ngram
listlengths
0
67.8k
[ "\"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\":", "0 } self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\",", "test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3)", "from a source other than Adobe, # then your use, modification, or distribution", "def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is raised, just logged self.assertTrue(not", "5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129,", "\"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret = [ { \"metric\":", "TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\",", "{ \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\":", "use, modify, and distribute this file in # accordance with the terms of", "0 } } ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076,", "\"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\":", "], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\"", "\"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [ { \"metric\":", "\"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\":", "accompanying # it. If you have received this file from a source other", "this file in # accordance with the terms of the Adobe license agreement", "\"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\":", "def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) #", "\"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\":", "# NOTICE: Adobe permits you to use, modify, and distribute this file in", "\"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\":", "\"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\":", "\"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\":", "\"1623619620\": 0 } } ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\":", "other than Adobe, # then your use, modification, or distribution of it requires", "{ \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145,", "{ \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312,", "\"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\":", "} } ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self):", "\"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\":", "\"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\":", "with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that response summary is", "\"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2 =", "modify, and distribute this file in # accordance with the terms of the", "\"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\":", "import unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase):", "\"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1)", "[ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }", "self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is raised, just", "{ \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\":", "0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0", "0 } }, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\":", "], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2", "\"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\":", "], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\":", "0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0,", "then your use, modification, or distribution of it requires the prior # written", "5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912,", "stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that response summary is correctly stripped", "145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312,", "\"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": {", "\"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\":", "All Rights Reserved. # # NOTICE: Adobe permits you to use, modify, and", "If you have received this file from a source other than Adobe, #", "{ \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\":", "stripped self.assertEqual(p, r.to_json(True)) # test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def", "0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145,", "5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0,", "\"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ]", "of Adobe. # import unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import", "0, \"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\":", "0 } } ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def", "self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0,", "}, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0", "\"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\":", "OpenTSDBResponse(self.response2) # expected response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test", "r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is raised, just logged", "5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440,", "\"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, {", "import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2", "3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471,", "or distribution of it requires the prior # written permission of Adobe. #", "file from a source other than Adobe, # then your use, modification, or", "= [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\"", "] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r =", "20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\":", "\"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\":", "\"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\" def", "from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 =", "OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\"", "= { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\":", "# it. If you have received this file from a source other than", "0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518,", "self.assertEqual(p, r.to_json(True)) # test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self):", "129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret =", "requires the prior # written permission of Adobe. # import unittest import json", "}, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" },", "response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that response summary", "0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440,", "\"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\":", "\"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\",", "\"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\":", "Copyright 2019 Adobe # All Rights Reserved. # # NOTICE: Adobe permits you", "self.response1 = \"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\":", "0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312,", "}, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\"", "\"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\":", "Adobe. # import unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time", "\"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\":", "\"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 },", "\"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\":", "test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected", "\"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\":", "0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\":", "3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145,", "# All Rights Reserved. # # NOTICE: Adobe permits you to use, modify,", "\"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\"", "\"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\": {", "129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret = [ {", "agreement accompanying # it. If you have received this file from a source", "\"uidPairsResolved\": 0 } self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\":", "\"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\":", "file in # accordance with the terms of the Adobe license agreement accompanying", "} }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163,", "= OpenTSDBResponse(self.response2) # expected response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) #", "0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20,", "0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076,", "OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with summary", "sort_keys=True) # test that response summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test", "class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [ { \"metric\":", "json.dumps(self.response2_ret, sort_keys=True) # test that response summary is correctly stripped self.assertEqual(p, r.to_json(True)) #", "unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def", "your use, modification, or distribution of it requires the prior # written permission", "145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076,", "distribute this file in # accordance with the terms of the Adobe license", "\"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\":", "Reserved. # # NOTICE: Adobe permits you to use, modify, and distribute this", "response summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats are properly", "0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\":", "145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0,", "json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1", "} self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\":", "\"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\":", "0, \"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not", "collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is raised,", "0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3", "setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": {", "test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is raised, just logged self.assertTrue(not r.get_stats())", "you to use, modify, and distribute this file in # accordance with the", "test that response summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats", "}, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\":", "} ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r", "\"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\":", "20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": {", "} }, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\"", "], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\":", "0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20,", "\"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret = [", "def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with summary stripped p =", "= json.dumps(self.response2_ret, sort_keys=True) # test that response summary is correctly stripped self.assertEqual(p, r.to_json(True))", "{ \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\":", "\"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\":", "[ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } },", "2019 Adobe # All Rights Reserved. # # NOTICE: Adobe permits you to", "and distribute this file in # accordance with the terms of the Adobe", "# test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r =", "Adobe permits you to use, modify, and distribute this file in # accordance", "protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\"", "expected response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that response", "\"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\":", "3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0,", "{ \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self):", "\"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [", "of it requires the prior # written permission of Adobe. # import unittest", "in # accordance with the terms of the Adobe license agreement accompanying #", "\"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20,", "0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ]", "\"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129,", "r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with summary stripped p", "\"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self): r", "0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163,", "] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\":", "\"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ],", "Rights Reserved. # # NOTICE: Adobe permits you to use, modify, and distribute", "permits you to use, modify, and distribute this file in # accordance with", "] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\":", "terms of the Adobe license agreement accompanying # it. If you have received", "source other than Adobe, # then your use, modification, or distribution of it", "\"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0,", "that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) #", "\"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 =", "use, modification, or distribution of it requires the prior # written permission of", "r = OpenTSDBResponse(self.response2) # expected response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True)", "# accordance with the terms of the Adobe license agreement accompanying # it.", "5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129,", "# then your use, modification, or distribution of it requires the prior #", "is correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats are properly collected self.assertDictEqual(self.stats2,", "of the Adobe license agreement accompanying # it. If you have received this", "\"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\":", "0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463,", "= \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\":", "\"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\":", "129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [ { \"metric\": \"this.metric\",", "the prior # written permission of Adobe. # import unittest import json from", "\"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats())", "129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [ {", "\"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463,", "you have received this file from a source other than Adobe, # then", "written permission of Adobe. # import unittest import json from protector.query.query import OpenTSDBQuery,", "r.to_json(True)) # test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r", "145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0,", "r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response", "self.response3 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\",", "OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 =", "that response summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats are", "1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129,", "0, \"1623619620\": 0 } } ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463,", "0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\":", "# # NOTICE: Adobe permits you to use, modify, and distribute this file", "\"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\":", "than Adobe, # then your use, modification, or distribution of it requires the", "20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\",", "\"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\": {", "\"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\":", "it requires the prior # written permission of Adobe. # import unittest import", "0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\":", "# Copyright 2019 Adobe # All Rights Reserved. # # NOTICE: Adobe permits", "\"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\":", "0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\": { \"env\":", "\"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129,", "correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats are properly collected self.assertDictEqual(self.stats2, r.get_stats())", "\"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 }", "} } ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\":", "\"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } }", "self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\":", "modification, or distribution of it requires the prior # written permission of Adobe.", "\"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } } ] \"\"\" self.response2_ret", "# test that response summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test that", "time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [ {", "129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255", "0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\":", "\"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\":", "p = json.dumps(self.response2_ret, sort_keys=True) # test that response summary is correctly stripped self.assertEqual(p,", "this file from a source other than Adobe, # then your use, modification,", "it. If you have received this file from a source other than Adobe,", "5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702,", "self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with summary stripped", "8.480518, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\"", "a source other than Adobe, # then your use, modification, or distribution of", "0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129,", "\"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\":", "[ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" },", "the Adobe license agreement accompanying # it. If you have received this file", "received this file from a source other than Adobe, # then your use,", "\"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2 = { \"avgAggregationTime\":", "distribution of it requires the prior # written permission of Adobe. # import", "0, \"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2 = { \"avgAggregationTime\": 0.806912,", "8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145,", "to use, modify, and distribute this file in # accordance with the terms", "Adobe, # then your use, modification, or distribution of it requires the prior", "import time class TestQuery(unittest.TestCase): def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [", "} ] self.stats2 = { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163,", "\"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\":", "0, \"1623619560\": 0, \"1623619620\": 0 } } ] \"\"\" def test_ok_empty_response(self): r =", "\"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\",", "\"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076,", "\"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"statsSummary\": { \"avgAggregationTime\": 0.806912,", "{ \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } } ] self.stats2 = {", "\"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\":", "\"emittedDPs\": 1440, \"groupByTime\": 0, \"maxHBaseTime\": 5.170471, \"maxScannerUidToStringTime\": 0, \"queryIndex\": 0, \"queryScanTime\": 5.436076, \"rowsPostFilter\":", "have received this file from a source other than Adobe, # then your", "129, \"saltScannerMergeTime\": 0.163702, \"serializationTime\": 0.808312, \"successfulScan\": 20, \"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\":", "def setUp(self): self.response1 = \"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\":", "prior # written permission of Adobe. # import unittest import json from protector.query.query", "import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class TestQuery(unittest.TestCase): def setUp(self):", "\"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\", \"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\"", "5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": {", "\"channel\": \"email\" }, \"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0,", "3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912,", "0.0255, \"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0,", "accordance with the terms of the Adobe license agreement accompanying # it. If", "\"\"\" def test_ok_empty_response(self): r = OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2)", "= \"[]\" self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\",", "0 } } ] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": {", "= OpenTSDBResponse(self.response1) self.assertTrue(not r.get_stats()) def test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with", "\"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\":", "} ] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\",", "the terms of the Adobe license agreement accompanying # it. If you have", "\"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 } self.response3 = \"\"\" [", "permission of Adobe. # import unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse", "\"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\",", "Adobe license agreement accompanying # it. If you have received this file from", "are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error", "# expected response with summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that", "# written permission of Adobe. # import unittest import json from protector.query.query import", "with the terms of the Adobe license agreement accompanying # it. If you", "{ \"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0,", "# import unittest import json from protector.query.query import OpenTSDBQuery, OpenTSDBResponse import time class", "NOTICE: Adobe permits you to use, modify, and distribute this file in #", "\"aggregateTags\": [ \"hostname\" ], \"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 }", "self.response2 = \"\"\" [ { \"metric\": \"this.metric\", \"tags\": { \"env\": \"prod\", \"recipientDomain\": \"gmail.com\",", "Adobe # All Rights Reserved. # # NOTICE: Adobe permits you to use,", "\"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\": 145, \"dpsPreFilter\":", "\"statsSummary\": { \"avgAggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgQueryScanTime\": 5.436076, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"avgSerializationTime\":", "stats are properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no", "test_ok_normal_response(self): r = OpenTSDBResponse(self.response2) # expected response with summary stripped p = json.dumps(self.response2_ret,", "summary is correctly stripped self.assertEqual(p, r.to_json(True)) # test that stats are properly collected", "properly collected self.assertDictEqual(self.stats2, r.get_stats()) def test_missing_stats_response(self): r = OpenTSDBResponse(self.response3) # no error is", "license agreement accompanying # it. If you have received this file from a", "summary stripped p = json.dumps(self.response2_ret, sort_keys=True) # test that response summary is correctly", "0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0 }", "\"dps\": { \"1623619500\": 0, \"1623619560\": 0, \"1623619620\": 0 } }, { \"metric\": \"this.metric\",", "\"uidPairsResolved\": 0, \"uidToStringTime\": 0.0255 }, \"rowsPostFilter\": 129, \"rowsPreFilter\": 129, \"successfulScan\": 20, \"uidPairsResolved\": 0", "1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471, \"maxQueryScanTime\": 5.436076, \"maxScannerUidToStringTime\": 0, \"maxSerializationTime\": 0.808312, \"maxUidToStringTime\": 0.0255,", "\"processingPreWriteTime\": 8.480518, \"queryIdx_00\": { \"aggregationTime\": 0.806912, \"avgHBaseTime\": 3.874463, \"avgScannerTime\": 3.888163, \"avgScannerUidToStringTime\": 0, \"dpsPostFilter\":", "} } ] \"\"\" self.response2_ret = [ { \"metric\": \"this.metric\", \"tags\": { \"env\":", "0, \"avgSerializationTime\": 0.808312, \"dpsPostFilter\": 145, \"dpsPreFilter\": 145, \"emittedDPs\": 1440, \"maxAggregationTime\": 0.806912, \"maxHBaseTime\": 5.170471," ]
[ ") Dictionary of baseline values for desired outputs. \"\"\" # Set all initial", "in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters", "self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in bdy_outputs:", "model. model: Assembly Parent assembly of the components we want to finite difference.", "a model and performs a finite difference. The cases are run with a", "self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution", "if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs:", "List of variable paths to use as finite difference inputs. outs: list( string", "conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to", "string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing. ''' #", "Only first order is supported right now. form: string Choose from 'CENTRAL', 'FORWARD',", "form gen.order = order # Save a reference to the original model so", "order # Save a reference to the original model so that we can", "# Calculate finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return", "outputs. stepsize: float Default stepsize to use. order: int Finite Difference order. Only", ") Dictionary of baseline values for input paramters. input_dict: dict( string : value", "Default is central differencing. ''' # Copy model. We need to null out", "#import StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder", "!= 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. # FFAD mode is", "DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes a subsection of a", "wrt. Variables are intialized with init_vals. input_dict: dict( string : value ) Dictionary", "delx = cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out]", "in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive", "self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out in", "list( string ) List of variable paths to use as finite difference inputs.", "we can increment the # execution counter as needed. self.copy_source = model #", "string : value ) Dictionary of baseline values for input paramters. input_dict: dict(", "cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx", "want to finite difference as a group. wrt: list( string ) List of", "float Default stepsize to use. order: int Finite Difference order. Only first order", "\"\"\" # Set all initial values for varname, value in input_dict.iteritems(): self.model.set(varname, value)", "on it.\"\"\" #import cPickle #import StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401", "self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\" Performs", "respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of variable", "if item not in comps + ['driver']: self.model.remove(item) # Remove all connections to", "self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count", "0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\" Performs finite difference of", "derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx", "original model so that we can increment the # execution counter as needed.", "= 0 def run(self, input_dict, output_dict): \"\"\" Performs finite difference of our submodel", "and a list of component names in that model. The model is deepcopied", "for desired outputs. \"\"\" # Set all initial values for varname, value in", "= len(wrt) gen.form = form gen.order = order # Save a reference to", "All but the needed comps are removed from the model. model: Assembly Parent", "counter as needed. self.copy_source = model # All execution counts should be reset", "all needed derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs = {} for", "''' # Copy model. We need to null out the reference to the", "self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0", "delx = cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out]", "in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the finite difference calculation", "self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True #", "a point distribution generator. Thus, we can take advantage of multiprocessing if it", "list( string ) List of variable paths to return as outputs. stepsize: float", "that model. The model is deepcopied to create a copy. All but the", "to wrt. Variables are intialized with init_vals. input_dict: dict( string : value )", "to create a copy. All but the needed comps are removed from the", "assembly of the components we want to finite difference. comps: list( string )", "can increment the # execution counter as needed. self.copy_source = model # All", "in self.model.list_containers(): if item not in comps + ['driver']: self.model.remove(item) # Remove all", "0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\" Returns", "with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of", "#import cPickle #import StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api", "self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt]", "wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt]", "= model.parent model.parent = None try: self.model = deepcopy(model) finally: model.parent = save_parent", "values for desired outputs. \"\"\" # Set all initial values for varname, value", "deepcopied to create a copy. All but the needed comps are removed from", "self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order", "first order is supported right now. form: string Choose from 'CENTRAL', 'FORWARD', and", "respect to wrt. Variables are intialized with init_vals. input_dict: dict( string : value", "'FORWARD', and \"BACKWARD'. Default is central differencing. ''' # Copy model. We need", "execution count from the copies to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp", "item in self.model.list_containers(): if item not in comps + ['driver']: self.model.remove(item) # Remove", "Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing. ''' # Copy", "string ) List of variable paths to return as outputs. stepsize: float Default", "# Remove all connections to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs =", "derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt]", "init_vals. input_dict: dict( string : value ) Dictionary of baseline values for input", "dict( string : value ) Dictionary of baseline values for desired outputs. \"\"\"", "the model. model: Assembly Parent assembly of the components we want to finite", "in self.model.driver.case_outputs: derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form ==", "ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that", "0 derivs = {} for out in self.model.driver.case_outputs: derivs[out] = {} for wrt,", "drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen", "''' Takes a model and a list of component names in that model.", "for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase += 1", "import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object", "list of component names in that model. The model is deepcopied to create", "parent before # we copy. save_parent = model.parent model.parent = None try: self.model", "difference. comps: list( string ) List of component names that we want to", "execution counts should be reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count =", "gen.num_parameters = len(wrt) gen.form = form gen.order = order # Save a reference", "of component names that we want to finite difference as a group. wrt:", "calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in", "the reference to the parent before # we copy. save_parent = model.parent model.parent", "''' An object that takes a subsection of a model and performs a", "the copies to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count", "counts should be reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0", "advantage of multiprocessing if it is available. ''' def __init__(self, model, comps, wrt,", "rid of the comps we don't need for item in self.model.list_containers(): if item", "\"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of variable paths that", "= deepcopy(model) finally: model.parent = save_parent # Get rid of the comps we", "are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a", "List of component names that we want to finite difference as a group.", "icase += 1 # Add the execution count from the copies to the", "the needed comps are removed from the model. model: Assembly Parent assembly of", "list( string ) List of component names that we want to finite difference", "of a model and perform finite difference on it.\"\"\" #import cPickle #import StringIO", "gen.form = form gen.order = order # Save a reference to the original", "from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes", "a copy. All but the needed comps are removed from the model. model:", "def run(self, input_dict, output_dict): \"\"\" Performs finite difference of our submodel with respect", "comps are removed from the model. model: Assembly Parent assembly of the components", "openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An", "StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from", "the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections():", "Set all initial values for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders =", "a subsection of a model and perform finite difference on it.\"\"\" #import cPickle", "self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs", "self.copy_source = model # All execution counts should be reset to zero. for", "0 return derivs def list_wrt(self): \"\"\" Returns a list of variable paths that", "multiprocessing if it is available. ''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6,", "= 1 gen.num_parameters = len(wrt) gen.form = form gen.order = order # Save", "Performs finite difference of our submodel with respect to wrt. Variables are intialized", "and performs a finite difference. The cases are run with a point distribution", "Calculate finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all", "Distribution driver to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver)", ") List of variable paths to use as finite difference inputs. outs: list(", "cases = self.model.driver.recorders[0].cases icase = 0 derivs = {} for out in self.model.driver.case_outputs:", "comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict):", "def list_outs(self): \"\"\" Returns a list of variable paths that we are differencing.", "derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs = {} for out in", "a subsection of a model and performs a finite difference. The cases are", "stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and a list of component names", "Dictionary of baseline values for desired outputs. \"\"\" # Set all initial values", "we copy. save_parent = model.parent model.parent = None try: self.model = deepcopy(model) finally:", "differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives", "model. The model is deepcopied to create a copy. All but the needed", "central differencing. ''' # Copy model. We need to null out the reference", "input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True", "to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of variable paths", "= cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] -", "deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver,", "Takes a model and a list of component names in that model. The", "output_dict): \"\"\" Performs finite difference of our submodel with respect to wrt. Variables", "= form gen.order = order # Save a reference to the original model", "(cases[icase][out] - output_dict[out])/delx icase += 1 # Add the execution count from the", "derivs def list_wrt(self): \"\"\" Returns a list of variable paths that we are", "= [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences.", "take a subsection of a model and perform finite difference on it.\"\"\" #import", "DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item,", ": value ) Dictionary of baseline values for desired outputs. \"\"\" # Set", "string ) List of component names that we want to finite difference as", "all initial values for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()]", "boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if conn[0]", "0 def run(self, input_dict, output_dict): \"\"\" Performs finite difference of our submodel with", "self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases = self.model.driver.recorders[0].cases icase = 0", "model.parent = None try: self.model = deepcopy(model) finally: model.parent = save_parent # Get", "self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt)", "source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\" Returns a", "is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases = self.model.driver.recorders[0].cases icase", "form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing. '''", "with init_vals. input_dict: dict( string : value ) Dictionary of baseline values for", "wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters =", "FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize)", "# Save a reference to the original model so that we can increment", "perform finite difference on it.\"\"\" #import cPickle #import StringIO from copy import deepcopy", "so that we can increment the # execution counter as needed. self.copy_source =", "out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else:", "= self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in bdy_inputs", "of variable paths to return as outputs. stepsize: float Default stepsize to use.", "finite difference on it.\"\"\" #import cPickle #import StringIO from copy import deepcopy #", "want to finite difference. comps: list( string ) List of component names that", "= 0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\" Performs finite difference", "a model and perform finite difference on it.\"\"\" #import cPickle #import StringIO from", "finite difference. comps: list( string ) List of component names that we want", "as a group. wrt: list( string ) List of variable paths to use", "['driver']: self.model.remove(item) # Remove all connections to the assembly boundary bdy_inputs = self.model.list_inputs()", "conn in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1])", "if conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver", "+= 2 else: delx = cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt]", "self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx =", "for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] -", "model and performs a finite difference. The cases are run with a point", "\"\"\" Returns a list of variable paths that we are differencing with respect", "connections to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn", "in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline =", "Default stepsize to use. order: int Finite Difference order. Only first order is", "paths to return as outputs. stepsize: float Default stepsize to use. order: int", "use. order: int Finite Difference order. Only first order is supported right now.", "bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1]", "assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if", "derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase += 1 # Add the execution", "are run with a point distribution generator. Thus, we can take advantage of", "2 else: delx = cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] =", "values for input paramters. input_dict: dict( string : value ) Dictionary of baseline", "pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class", "the components we want to finite difference. comps: list( string ) List of", "count from the copies to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp =", "in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count +=", "paths that we are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self):", "of the comps we don't need for item in self.model.list_containers(): if item not", "derivs = {} for out in self.model.driver.case_outputs: derivs[out] = {} for wrt, val", "model.parent = save_parent # Get rid of the comps we don't need for", "can take advantage of multiprocessing if it is available. ''' def __init__(self, model,", "model # All execution counts should be reset to zero. for comp in", "finite difference of our submodel with respect to wrt. Variables are intialized with", "not in comps + ['driver']: self.model.remove(item) # Remove all connections to the assembly", "all connections to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for", "increment the # execution counter as needed. self.copy_source = model # All execution", "An object that takes a subsection of a model and performs a finite", "model: Assembly Parent assembly of the components we want to finite difference. comps:", "the execution count from the copies to the originals. for comp in self.model.driver.workflow.__iter__():", "self.model.run() # Return all needed derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs", "'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\", "the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps)", "a finite difference. The cases are run with a point distribution generator. Thus,", "to use. order: int Finite Difference order. Only first order is supported right", "Object that can take a subsection of a model and perform finite difference", "finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed", "model. We need to null out the reference to the parent before #", "should be reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count", "that we can increment the # execution counter as needed. self.copy_source = model", "= \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt] -", "variable paths to return as outputs. stepsize: float Default stepsize to use. order:", "comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def", "difference. The cases are run with a point distribution generator. Thus, we can", "to the parent before # we copy. save_parent = model.parent model.parent = None", "difference of our submodel with respect to wrt. Variables are intialized with init_vals.", "Save a reference to the original model so that we can increment the", "= gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs =", "take advantage of multiprocessing if it is available. ''' def __init__(self, model, comps,", "for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order =", "for item in self.model.list_containers(): if item not in comps + ['driver']: self.model.remove(item) #", "{} for out in self.model.driver.case_outputs: derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems():", "component names in that model. The model is deepcopied to create a copy.", "wrt: list( string ) List of variable paths to use as finite difference", "import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes a subsection", "comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and a list", "reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0", "a model and a list of component names in that model. The model", "# Copy model. We need to null out the reference to the parent", "a list of component names in that model. The model is deepcopied to", "from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): '''", "comps we don't need for item in self.model.list_containers(): if item not in comps", "of variable paths that we are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys()", "value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline", "comps: list( string ) List of component names that we want to finite", "Return all needed derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs = {}", "with a point distribution generator. Thus, we can take advantage of multiprocessing if", "needed derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs = {} for out", "= order # Save a reference to the original model so that we", "zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self,", "== 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] =", "save_parent # Get rid of the comps we don't need for item in", "from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing. ''' # Copy model.", "be reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count =", "= None try: self.model = deepcopy(model) finally: model.parent = save_parent # Get rid", "\\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes a subsection of", "comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\" Performs finite difference of our", "are removed from the model. model: Assembly Parent assembly of the components we", "self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of variable paths that we are", "subsection of a model and perform finite difference on it.\"\"\" #import cPickle #import", "'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing. ''' # Copy model. We", "(cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt] - input_dict[wrt] for", "cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase +=", "copy. All but the needed comps are removed from the model. model: Assembly", "Dictionary of baseline values for input paramters. input_dict: dict( string : value )", "from copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver", "component names that we want to finite difference as a group. wrt: list(", "we don't need for item in self.model.list_containers(): if item not in comps +", "the originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count", "difference on it.\"\"\" #import cPickle #import StringIO from copy import deepcopy # pylint:", "our submodel with respect to wrt. Variables are intialized with init_vals. input_dict: dict(", "for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2", "val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for", "removed from the model. model: Assembly Parent assembly of the components we want", "outputs. \"\"\" # Set all initial values for varname, value in input_dict.iteritems(): self.model.set(varname,", "it is available. ''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'):", "of baseline values for desired outputs. \"\"\" # Set all initial values for", "source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return", "components we want to finite difference. comps: list( string ) List of component", "names that we want to finite difference as a group. wrt: list( string", "in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase += 1 # Add", "names in that model. The model is deepcopied to create a copy. All", "difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item", "for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form !=", "# All execution counts should be reset to zero. for comp in self.model.driver.workflow.__iter__():", "The cases are run with a point distribution generator. Thus, we can take", "paramters. input_dict: dict( string : value ) Dictionary of baseline values for desired", "conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the finite difference", "return as outputs. stepsize: float Default stepsize to use. order: int Finite Difference", "string ) List of variable paths to use as finite difference inputs. outs:", "to finite difference. comps: list( string ) List of component names that we", "don't need for item in self.model.list_containers(): if item not in comps + ['driver']:", "list_wrt(self): \"\"\" Returns a list of variable paths that we are differencing with", "available. ''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes", "baseline values for input paramters. input_dict: dict( string : value ) Dictionary of", "needed comps are removed from the model. model: Assembly Parent assembly of the", "= cases[icase][wrt] - cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] -", "with respect to wrt. Variables are intialized with init_vals. input_dict: dict( string :", "copy. save_parent = model.parent model.parent = None try: self.model = deepcopy(model) finally: model.parent", "we can take advantage of multiprocessing if it is available. ''' def __init__(self,", "bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in", "stepsize to use. order: int Finite Difference order. Only first order is supported", "are intialized with init_vals. input_dict: dict( string : value ) Dictionary of baseline", "submodel with respect to wrt. Variables are intialized with init_vals. input_dict: dict( string", "generator. Thus, we can take advantage of multiprocessing if it is available. '''", "+ ['driver']: self.model.remove(item) # Remove all connections to the assembly boundary bdy_inputs =", "if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. # FFAD", "else: delx = cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\", "model is deepcopied to create a copy. All but the needed comps are", "gen.order = order # Save a reference to the original model so that", "Variables are intialized with init_vals. input_dict: dict( string : value ) Dictionary of", "\\ (cases[icase][out] - output_dict[out])/delx icase += 1 # Add the execution count from", "out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase += 1 #", ") List of component names that we want to finite difference as a", "is deepcopied to create a copy. All but the needed comps are removed", "len(wrt) gen.form = form gen.order = order # Save a reference to the", "<gh_stars>0 \"\"\" Object that can take a subsection of a model and perform", "out in self.model.driver.case_outputs: derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form", "initial values for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if", "of variable paths to use as finite difference inputs. outs: list( string )", "as needed. self.copy_source = model # All execution counts should be reset to", "{} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt]", "string : value ) Dictionary of baseline values for desired outputs. \"\"\" #", "source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count", "cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt] - input_dict[wrt] for out in", "comps + ['driver']: self.model.remove(item) # Remove all connections to the assembly boundary bdy_inputs", "null out the reference to the parent before # we copy. save_parent =", "we are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns", "in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx = cases[icase][wrt] - cases[icase+1][wrt] for out", "''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a", "form='CENTRAL'): ''' Takes a model and a list of component names in that", "of baseline values for input paramters. input_dict: dict( string : value ) Dictionary", "use as finite difference inputs. outs: list( string ) List of variable paths", "order: int Finite Difference order. Only first order is supported right now. form:", "value ) Dictionary of baseline values for input paramters. input_dict: dict( string :", "return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list of variable paths that we", "run with a point distribution generator. Thus, we can take advantage of multiprocessing", "= save_parent # Get rid of the comps we don't need for item", "self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt:", "to the original model so that we can increment the # execution counter", "Finite Difference order. Only first order is supported right now. form: string Choose", "= model # All execution counts should be reset to zero. for comp", ") List of variable paths to return as outputs. stepsize: float Default stepsize", "All execution counts should be reset to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count", "save_parent = model.parent model.parent = None try: self.model = deepcopy(model) finally: model.parent =", "\"BACKWARD'. Default is central differencing. ''' # Copy model. We need to null", "but the needed comps are removed from the model. model: Assembly Parent assembly", "= {} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL': delx =", "that can take a subsection of a model and perform finite difference on", "the # execution counter as needed. self.copy_source = model # All execution counts", "# pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator", "as outputs. stepsize: float Default stepsize to use. order: int Finite Difference order.", "to use as finite difference inputs. outs: list( string ) List of variable", "\"\"\" Object that can take a subsection of a model and perform finite", "from the model. model: Assembly Parent assembly of the components we want to", "= 0 return derivs def list_wrt(self): \"\"\" Returns a list of variable paths", "create a copy. All but the needed comps are removed from the model.", "driver to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator", "variable paths that we are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def", "finite difference. The cases are run with a point distribution generator. Thus, we", "item not in comps + ['driver']: self.model.remove(item) # Remove all connections to the", "item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1", "inputs. outs: list( string ) List of variable paths to return as outputs.", "self.model = deepcopy(model) finally: model.parent = save_parent # Get rid of the comps", "- cases[icase+1][wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase", "difference inputs. outs: list( string ) List of variable paths to return as", "the comps we don't need for item in self.model.list_containers(): if item not in", "The model is deepcopied to create a copy. All but the needed comps", "# Get rid of the comps we don't need for item in self.model.list_containers():", "needed. self.copy_source = model # All execution counts should be reset to zero.", "supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases = self.model.driver.recorders[0].cases icase =", "from the copies to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name)", "and perform finite difference on it.\"\"\" #import cPickle #import StringIO from copy import", "copies to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count +=", "in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx", "paths to use as finite difference inputs. outs: list( string ) List of", ": value ) Dictionary of baseline values for input paramters. input_dict: dict( string", "self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase += 1 # Add the", "out the reference to the parent before # we copy. save_parent = model.parent", "\"\"\" Returns a list of variable paths that we are differencing. \"\"\" return", "model so that we can increment the # execution counter as needed. self.copy_source", "the original model so that we can increment the # execution counter as", "baseline values for desired outputs. \"\"\" # Set all initial values for varname,", "order. Only first order is supported right now. form: string Choose from 'CENTRAL',", "deepcopy(model) finally: model.parent = save_parent # Get rid of the comps we don't", "self.model.driver.case_outputs: derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems(): if self.model.driver.distribution_generator.form == 'CENTRAL':", "input paramters. input_dict: dict( string : value ) Dictionary of baseline values for", "and \"BACKWARD'. Default is central differencing. ''' # Copy model. We need to", "+= comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs", "of multiprocessing if it is available. ''' def __init__(self, model, comps, wrt, outs,", "to zero. for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def", "of component names in that model. The model is deepcopied to create a", "value ) Dictionary of baseline values for desired outputs. \"\"\" # Set all", "self.model.list_containers(): if item not in comps + ['driver']: self.model.remove(item) # Remove all connections", "= self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in", "None try: self.model = deepcopy(model) finally: model.parent = save_parent # Get rid of", "input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase +=", "comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\" Returns a list of variable", "# Distribution driver to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen =", "int Finite Difference order. Only first order is supported right now. form: string", "self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form = form gen.order", "# Add the execution count from the copies to the originals. for comp", "finite difference as a group. wrt: list( string ) List of variable paths", "to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator =", "to return as outputs. stepsize: float Default stepsize to use. order: int Finite", "+= comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\" Returns a list", "Returns a list of variable paths that we are differencing. \"\"\" return self.model.driver.case_outputs", "supported right now. form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is", "we want to finite difference as a group. wrt: list( string ) List", "\"\"\" Performs finite difference of our submodel with respect to wrt. Variables are", "as finite difference inputs. outs: list( string ) List of variable paths to", "return derivs def list_wrt(self): \"\"\" Returns a list of variable paths that we", "desired outputs. \"\"\" # Set all initial values for varname, value in input_dict.iteritems():", "comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\" Returns a list of", "icase = 0 derivs = {} for out in self.model.driver.case_outputs: derivs[out] = {}", "outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form = form gen.order = order", "self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True)", "We need to null out the reference to the parent before # we", "in comps + ['driver']: self.model.remove(item) # Remove all connections to the assembly boundary", "finite difference inputs. outs: list( string ) List of variable paths to return", "cases are run with a point distribution generator. Thus, we can take advantage", "1 gen.num_parameters = len(wrt) gen.form = form gen.order = order # Save a", "order is supported right now. form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'.", "value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate", "dict( string : value ) Dictionary of baseline values for input paramters. input_dict:", "True # Calculate finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() #", "Thus, we can take advantage of multiprocessing if it is available. ''' def", "openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes a", "takes a subsection of a model and performs a finite difference. The cases", "input_dict: dict( string : value ) Dictionary of baseline values for input paramters.", "FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases =", "performs a finite difference. The cases are run with a point distribution generator.", "Copy model. We need to null out the reference to the parent before", "bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the", "input_dict, output_dict): \"\"\" Performs finite difference of our submodel with respect to wrt.", "outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and a list of component", "self.model.remove(item) # Remove all connections to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs", "list_outs(self): \"\"\" Returns a list of variable paths that we are differencing. \"\"\"", "# FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases", "model.parent model.parent = None try: self.model = deepcopy(model) finally: model.parent = save_parent #", "List of variable paths to return as outputs. stepsize: float Default stepsize to", "= 0 derivs = {} for out in self.model.driver.case_outputs: derivs[out] = {} for", "run(self, input_dict, output_dict): \"\"\" Performs finite difference of our submodel with respect to", "wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and a list of", "= 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self): \"\"\"", "to finite difference as a group. wrt: list( string ) List of variable", "comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count = 0 return derivs def list_wrt(self):", "to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in", "that we want to finite difference as a group. wrt: list( string )", "mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run() # Return all needed derivatives cases = self.model.driver.recorders[0].cases", "\\ (cases[icase][out] - cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt] - input_dict[wrt]", "1 # Add the execution count from the copies to the originals. for", "of our submodel with respect to wrt. Variables are intialized with init_vals. input_dict:", "= \\ (cases[icase][out] - output_dict[out])/delx icase += 1 # Add the execution count", "of the components we want to finite difference. comps: list( string ) List", "in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) #", "= True # Calculate finite differences. # FFAD mode is supported. self.model.driver.calc_derivatives(first=True) self.model.run()", "the parent before # we copy. save_parent = model.parent model.parent = None try:", "object that takes a subsection of a model and performs a finite difference.", "comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\" Performs finite", "varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL':", "values for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form", "can take a subsection of a model and perform finite difference on it.\"\"\"", "FiniteDifferenceGenerator class FDhelper(object): ''' An object that takes a subsection of a model", "try: self.model = deepcopy(model) finally: model.parent = save_parent # Get rid of the", "= {} for out in self.model.driver.case_outputs: derivs[out] = {} for wrt, val in", "before # we copy. save_parent = model.parent model.parent = None try: self.model =", "Assembly Parent assembly of the components we want to finite difference. comps: list(", "model and a list of component names in that model. The model is", "for out in self.model.driver.case_outputs: derivs[out] = {} for wrt, val in self.model.driver.get_parameters().iteritems(): if", "point distribution generator. Thus, we can take advantage of multiprocessing if it is", "differencing. ''' # Copy model. We need to null out the reference to", "gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs", "difference as a group. wrt: list( string ) List of variable paths to", "is available. ''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): '''", "= self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count += comp.derivative_exec_count comp.derivative_exec_count =", "if it is available. ''' def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1,", "to the originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count", "import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\", "for conn in self.model.list_connections(): if conn[0] in bdy_inputs or conn[1] in bdy_outputs: self.model.disconnect(conn[0],", "comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0 source_comp.derivative_exec_count", "that we are differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\"", "self.model.list_inputs() bdy_outputs = self.model.list_outputs() for conn in self.model.list_connections(): if conn[0] in bdy_inputs or", "to null out the reference to the parent before # we copy. save_parent", "in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self, input_dict, output_dict): \"\"\"", "right now. form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central", "need to null out the reference to the parent before # we copy.", "model and perform finite difference on it.\"\"\" #import cPickle #import StringIO from copy", "now. form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default is central differencing.", "cPickle #import StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import", "def __init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model", "Add the execution count from the copies to the originals. for comp in", "class FDhelper(object): ''' An object that takes a subsection of a model and", "- input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx icase", "bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the finite difference calculation self.model.add('driver',", "stepsize: float Default stepsize to use. order: int Finite Difference order. Only first", "reference to the parent before # we copy. save_parent = model.parent model.parent =", "self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form = form gen.order = order #", "FDhelper(object): ''' An object that takes a subsection of a model and performs", "= outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form = form gen.order =", "execution counter as needed. self.copy_source = model # All execution counts should be", "for input paramters. input_dict: dict( string : value ) Dictionary of baseline values", "reference to the original model so that we can increment the # execution", "conn[1]) # Distribution driver to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen", "originals. for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count =", "Returns a list of variable paths that we are differencing with respect to.", "Remove all connections to the assembly boundary bdy_inputs = self.model.list_inputs() bdy_outputs = self.model.list_outputs()", "group. wrt: list( string ) List of variable paths to use as finite", "it.\"\"\" #import cPickle #import StringIO from copy import deepcopy # pylint: disable-msg=E0611,F0401 from", "disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import \\ DistributionCaseDriver, FiniteDifferenceGenerator class FDhelper(object):", "variable paths to use as finite difference inputs. outs: list( string ) List", "in that model. The model is deepcopied to create a copy. All but", "is central differencing. ''' # Copy model. We need to null out the", "differencing with respect to. \"\"\" return self.model.driver.get_parameters().keys() def list_outs(self): \"\"\" Returns a list", "__init__(self, model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and", "a reference to the original model so that we can increment the #", "def list_wrt(self): \"\"\" Returns a list of variable paths that we are differencing", "# Return all needed derivatives cases = self.model.driver.recorders[0].cases icase = 0 derivs =", "# we copy. save_parent = model.parent model.parent = None try: self.model = deepcopy(model)", "is supported right now. form: string Choose from 'CENTRAL', 'FORWARD', and \"BACKWARD'. Default", "a group. wrt: list( string ) List of variable paths to use as", "or conn[1] in bdy_outputs: self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the finite", "'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. # FFAD mode is supported.", "low=-1e99, high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form", "model, comps, wrt, outs, stepsize=1.0e-6, order=1, form='CENTRAL'): ''' Takes a model and a", "distribution generator. Thus, we can take advantage of multiprocessing if it is available.", "# Set all initial values for varname, value in input_dict.iteritems(): self.model.set(varname, value) self.model.driver.recorders", "+= 1 # Add the execution count from the copies to the originals.", "[ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. #", "fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form = form", "# execution counter as needed. self.copy_source = model # All execution counts should", "self.model.driver.recorders[0].cases icase = 0 derivs = {} for out in self.model.driver.case_outputs: derivs[out] =", "subsection of a model and performs a finite difference. The cases are run", "Difference order. Only first order is supported right now. form: string Choose from", "a list of variable paths that we are differencing with respect to. \"\"\"", "icase += 2 else: delx = cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs:", "finally: model.parent = save_parent # Get rid of the comps we don't need", "need for item in self.model.list_containers(): if item not in comps + ['driver']: self.model.remove(item)", "intialized with init_vals. input_dict: dict( string : value ) Dictionary of baseline values", "copy import deepcopy # pylint: disable-msg=E0611,F0401 from openmdao.lib.casehandlers.api import ListCaseRecorder from openmdao.lib.drivers.distributioncasedriver import", "Parent assembly of the components we want to finite difference. comps: list( string", "= FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99, high=1e99,", "self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite differences. # FFAD mode", "self.model.driver.recorders = [ListCaseRecorder()] if self.model.driver.distribution_generator.form != 'CENTRAL': self.model.driver.distribution_generator.skip_baseline = True # Calculate finite", "outs: list( string ) List of variable paths to return as outputs. stepsize:", "finite difference calculation self.model.add('driver', DistributionCaseDriver()) gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for", "- output_dict[out])/delx icase += 1 # Add the execution count from the copies", "input_dict: dict( string : value ) Dictionary of baseline values for desired outputs.", "list of variable paths that we are differencing with respect to. \"\"\" return", "self.model.disconnect(conn[0], conn[1]) # Distribution driver to drive the finite difference calculation self.model.add('driver', DistributionCaseDriver())", "we want to finite difference. comps: list( string ) List of component names", "output_dict[out])/delx icase += 1 # Add the execution count from the copies to", "- cases[icase+1][out])/delx icase += 2 else: delx = cases[icase][wrt] - input_dict[wrt] for out", "Get rid of the comps we don't need for item in self.model.list_containers(): if", "= self.model.driver.recorders[0].cases icase = 0 derivs = {} for out in self.model.driver.case_outputs: derivs[out]", "order=1, form='CENTRAL'): ''' Takes a model and a list of component names in", "high=1e99, fd_step=stepsize) self.model.driver.case_outputs = outs self.model.driver.ffd_order = 1 gen.num_parameters = len(wrt) gen.form =", "for comp in self.model.driver.workflow.__iter__(): comp.exec_count = 0 comp.derivative_exec_count = 0 def run(self, input_dict,", "gen = FiniteDifferenceGenerator(self.model.driver) self.model.driver.distribution_generator = gen self.model.driver.workflow.add(comps) for item in wrt: self.model.driver.add_parameter(item, low=-1e99,", "that takes a subsection of a model and performs a finite difference. The", "cases[icase][wrt] - input_dict[wrt] for out in self.model.driver.case_outputs: derivs[out][wrt] = \\ (cases[icase][out] - output_dict[out])/delx", "for comp in self.model.driver.workflow.__iter__(): source_comp = self.copy_source.get(comp.name) source_comp.exec_count += comp.exec_count comp.exec_count = 0", "of a model and performs a finite difference. The cases are run with" ]
[ "morepath from .app import App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App()) if", "import App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App()) if __name__ == '__main__':", "from .app import App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App()) if __name__", "App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App()) if __name__ == '__main__': run()", ".app import App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App()) if __name__ ==", "import morepath from .app import App def run(): print('Running app...') morepath.autoscan() App.commit() morepath.run(App())" ]
[ "orders: prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING):", "datastore_index.Normalize) orders from query max_query_components: limit on query complexity \"\"\" def BadRequest(message): raise", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "filter namespace is %s but query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space()))", "len(filters) + len(orders) if query.has_ancestor(): num_components += 1 if num_components > max_query_components: BadRequest('query", "<= end_key: end_key = limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or", "== datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only comparison filters on", "address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for", "ancestor app is %s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace", "supported. ' 'Encountered both %s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is", "Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0", "property is %s but the inequality filter ' 'is on %s' % (first_order_prop,", "allow filters and ascending-orders on __key__. Raises exceptions for illegal queries. Args: filters:", "= None for filter in filters: if filter.property_size() != 1: BadRequest('Filter has %d", "apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if", "the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind,", "orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort property must be the same", "query app is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s", "None start_inclusive = False end_key = None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY", "end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key):", "end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op()", "md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return", "namespace - kindless queries only filter on __key__ and only sort on __key__", "end_key is None or limit <= end_key: end_key = limit end_inclusive = False", "= False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is", "ID from an email address. Note that this is not the same user", "for all user properties in a set of filters. Args: filters: The normalized", "== datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is None or limit <", "the form '__kind__'/name. Args: key: a key for a __kind__ instance, or a", "this file except in compliance with the License. # You may obtain a", "= f.op() if not (f.property_size() == 1 and f.property(0).name() == key_prop and not", "false value. Returns: namespace specified by key, or key if key is a", "address. Note that this is not the same user ID found in the", "import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import", "the following conditions are violated: - transactional queries have an ancestor - queries", "Returns: The key range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\"", "FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for all user properties in a", "[] for o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() ==", "if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if", "order (if any) must be on X Args: query: query to validate filters:", "query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive", "orders): \"\"\"Parse queries which only allow filters and ascending-orders on __key__. Raises exceptions", "but query app is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space():", "a false value. Returns: kind specified by key, or key if key is", "key_path[1] raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__", "following conditions are violated: - transactional queries have an ancestor - queries that", "all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow", "on __key__ and only sort on __key__ ascending - multiple inequality (<, <=,", "from query. \"\"\" for filter in filters: for property in filter.property_list(): FillUser(property) def", "if ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one", "(first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if", "(len(key_path) == 2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise", "ANY KIND, either express or implied. # See the License for the specific", "% (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if", "(f.property_size() == 1 and f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN or", "key_prop + ' supported') if remaining_orders: raise BadRequestError('Only ascending order on ' +", "unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are", "(query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in filters: if filter.property_size() != 1:", "for f in filters: op = f.op() if not (f.property_size() == 1 and", "def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises an ApplicationError if the", "from __namespace__ key. Raises an ApplicationError if the key is not of the", "query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters,", "f.op() if not (f.property_size() == 1 and f.property(0).name() == key_prop and not (op", "a __namespace__ instance, or a false value. Returns: namespace specified by key, or", "prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per query is supported.", "query.app(): BadRequest('%s filter app is %s but query app is %s' % (key_prop_name,", "filters: op = f.op() if not (f.property_size() == 1 and f.property(0).name() == key_prop", "limit < end_key: end_key = limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN:", "allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind),", "from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def", "a key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None", "== datastore_pb.Query_Filter.EQUAL): if end_key is None or limit < end_key: end_key = limit", "in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user ID for a", "%s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name):", "' 'to which the inequality filter is applied. In your query ' 'the", "(schema) queries. Raises exceptions for illegal queries. Args: query: A Query PB. filters:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and", "1: BadRequest('Filter has %d properties, expected 1' % filter.property_size()) prop = filter.property(0) prop_name", "end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit", "ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in", "!= ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace is %s' % (query.name_space(),", "kindless queries only filter on __key__ and only sort on __key__ ascending -", "max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is", "is None or limit >= start_key: start_key = limit start_inclusive = False elif", "filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name !=", "and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import", "op = f.op() if not (f.property_size() == 1 and f.property(0).name() == key_prop and", "and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not None and orders: first_order_prop", "limit <= end_key: end_key = limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL", "'__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1] raise", "string userid derived from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id =", "and namespace - kindless queries only filter on __key__ and only sort on", "must be on X Args: query: query to validate filters: normalized (by datastore_index.Normalize)", "filter on __key__ and only sort on __key__ ascending - multiple inequality (<,", "is %s but ancestor app is %s' % (query.app(), ancestor.app())) if query.name_space() !=", "filter.property_size() != 1: BadRequest('Filter has %d properties, expected 1' % filter.property_size()) prop =", "and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except __key__ ascending')", "google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is %s but ancestor", "ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name", "o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o)", "email address. Returns: A string userid derived from the email address. \"\"\" user_id_digest", "BadRequest('The first sort property must be the same as the property ' 'to", "(_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises", "a user properties. Args: property: A Property which may have a user value.", "OF ANY KIND, either express or implied. # See the License for the", "when any of the following conditions are violated: - transactional queries have an", "multiple inequality (<, <=, >, >=) filters all applied to the same property", "if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid", "not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a", "' + key_prop + ' supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query,", "Args: key: a key for a __namespace__ instance, or a false value. Returns:", "import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query with normalized", "in the query. \"\"\" remaining_filters = [] start_key = None start_inclusive = False", "filters') for order in orders: prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name", "sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError", "with normalized filters, orders. Raises an ApplicationError when any of the following conditions", "any of the following conditions are violated: - transactional queries have an ancestor", "of filters, orders, ancestor <= max_query_components) - ancestor (if any) app and namespace", "if an inequality filter on prop X is used, the first order (if", "for illegal queries. Args: query: A Query PB. filters: the normalized filters from", "start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises an", "and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for", "in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for", "instance, or a false value. Returns: kind specified by key, or key if", "datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op() if not (f.property_size() == 1", "' 'Encountered both %s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not", "if start_key is None or limit >= start_key: start_key = limit start_inclusive =", "not None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first", "ApplicationError when any of the following conditions are violated: - transactional queries have", "for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only", "must be the same as the property ' 'to which the inequality filter", "normalized filters from query. \"\"\" for filter in filters: for property in filter.property_list():", "# Copyright 2007 Google Inc. # # Licensed under the Apache License, Version", "BadRequestError( 'Only comparison filters on ' + key_prop + ' supported') if remaining_orders:", "prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name !=", "ancestor total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app():", "in filters: op = f.op() if not (f.property_size() == 1 and f.property(0).name() ==", "have more than %s filters' ' + sort orders ancestor total' % max_query_components)", "in orders: prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction() is", "== datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for", "file and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors", "__key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters and ascending-orders", "not prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key' % key_prop_name) ref_val =", "== datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind", "Note that this is not the same user ID found in the production", "the key is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a", "are not too large (sum of filters, orders, ancestor <= max_query_components) - ancestor", "= '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id def", "unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only", "if start_key is None or limit > start_key: start_key = limit start_inclusive =", "is None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter", "inequality filter per query is supported. ' 'Encountered both %s and %s') %", "(prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters')", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", ">= start_key: start_key = limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or", "if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.') num_components", "if not key: return key key_path = key.to_path() if len(key_path) == 2 and", "(op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is None or limit", "properties. Args: property: A Property which may have a user value. \"\"\" if", "google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate", "not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value()", "= ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace", "def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters and ascending-orders on __key__.", "BadRequestError('Only ascending order on ' + key_prop + ' supported') return (start_key, start_inclusive,", "as the property ' 'to which the inequality filter is applied. In your", "for non-__key__ filters') for order in orders: prop_name = order.property().decode('utf-8') if not (prop_name", "not the same user ID found in the production system. Args: email: An", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions", "query. \"\"\" for filter in filters: for property in filter.property_list(): FillUser(property) def FillUser(property):", "== 2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid", "query with normalized filters, orders. Raises an ApplicationError when any of the following", "user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for all", "queries are allowed inside transactions.') num_components = len(filters) + len(orders) if query.has_ancestor(): num_components", "filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter", "if not query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name !=", "end_key: end_key = limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key", "False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is None", "not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive,", "the same app and namespace as the query - if an inequality filter", "for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user", "None for filter in filters: if filter.property_size() != 1: BadRequest('Filter has %d properties,", "!= key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for", ">, >=) filters all applied to the same property - filters on __key__", "query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name", "from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components):", "> start_key: start_key = limit start_inclusive = True remaining_orders = [] for o", "user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for all user properties", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "filters: The normalized filters from query. \"\"\" for filter in filters: for property", "Raises an ApplicationError if the key is not of the form '__namespace__'/name or", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "remaining_filters = [] start_key = None start_inclusive = False end_key = None end_inclusive", "key: return key key_path = key.to_path() if (len(key_path) == 2 and key_path[0] ==", "X Args: query: query to validate filters: normalized (by datastore_index.Normalize) filters from query", "value must be a Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() !=", "\"\"\"Extract kind name from __kind__ key. Raises an ApplicationError if the key is", "def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an email address. Note that", "def FillUser(property): \"\"\"Fill in a synthetic user ID for a user properties. Args:", "= order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is", "inequality filter on prop X is used, the first order (if any) must", "the production system. Args: email: An email address. Returns: A string userid derived", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "by key, or key if key is a false value. \"\"\" if not", "ancestor (if any) app and namespace match query app and namespace - kindless", "per query is supported. ' 'Encountered both %s and %s') % (ineq_prop_name, prop_name))", "is not the same user ID found in the production system. Args: email:", "derived from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' +", "= key.to_path() if (len(key_path) == 2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)):", "FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user ID for a user properties.", "return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key.", "BadRequest('Filter has %d properties, expected 1' % filter.property_size()) prop = filter.property(0) prop_name =", "required by applicable law or agreed to in writing, software # distributed under", "of a query. orders: the normalized orders of a query. Returns: The key", "supported') if remaining_orders: raise BadRequestError('Only ascending order on ' + key_prop + '", "a key for a __kind__ instance, or a false value. Returns: kind specified", "datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from", "% ord(x) for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a", "[] start_key = None start_inclusive = False end_key = None end_inclusive = False", "key_prop and not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val", "True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >= start_key:", "for illegal queries. Args: filters: the normalized filters of a query. orders: the", "range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" if query.has_ancestor(): raise", "applicable law or agreed to in writing, software # distributed under the License", "orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries. Args: query: A Query", "max_query_components: BadRequest('query is too large. may not have more than %s filters' '", "of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__", "% (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in filters: if filter.property_size() !=", "Raises exceptions for illegal queries. Args: filters: the normalized filters of a query.", "in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name", "remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only comparison filters on ' +", "one inequality filter per query is supported. ' 'Encountered both %s and %s')", "\"\"\"Fill in a synthetic user ID for all user properties in a set", "and ascending-orders on __key__. Raises exceptions for illegal queries. Args: filters: the normalized", "system. Args: email: An email address. Returns: A string userid derived from the", "= ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed", "queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind),", "< end_key: end_key = limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if", "orders from query. Returns: The kind range (start, start_inclusive, end, end_inclusive) requested in", "in filters: for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic", "may have a user value. \"\"\" if property.value().has_uservalue(): uid = SynthesizeUserId(property.value().uservalue().email()) if uid:", "datastore_pb.Query_Filter.EQUAL): if start_key is None or limit > start_key: start_key = limit start_inclusive", "or agreed to in writing, software # distributed under the License is distributed", "user ID for a user properties. Args: property: A Property which may have", "% (ineq_prop_name, prop_name)) if ineq_prop_name is not None and orders: first_order_prop = orders[0].property().decode('utf-8')", "form '__kind__'/name. Args: key: a key for a __kind__ instance, or a false", "the inequality filter is applied. In your query ' 'the first sort property", "prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key'", "user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id", "have a user value. \"\"\" if property.value().has_uservalue(): uid = SynthesizeUserId(property.value().uservalue().email()) if uid: property.mutable_value().mutable_uservalue().set_obfuscated_gaiaid(uid)", "key key_path = key.to_path() if (len(key_path) == 2 and key_path[0] == '__kind__' and", "is None or limit <= end_key: end_key = limit end_inclusive = False elif", "and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring):", "filters. Args: filters: The normalized filters from query. \"\"\" for filter in filters:", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "namespace is %s but query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if", "end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key):", "the query - if an inequality filter on prop X is used, the", "than %s filters' ' + sort orders ancestor total' % max_query_components) if query.has_ancestor():", "ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s but query", "''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in", "an email address. Note that this is not the same user ID found", "% max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query app", "if filter.property_size() != 1: BadRequest('Filter has %d properties, expected 1' % filter.property_size()) prop", "and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort property", "Property which may have a user value. \"\"\" if property.value().has_uservalue(): uid = SynthesizeUserId(property.value().uservalue().email())", "ineq_prop_name)) if not query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name", "Args: query: A Query PB. filters: the normalized filters from query. orders: the", "BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb", "(filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name =", "orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break", "!= 1: BadRequest('Filter has %d properties, expected 1' % filter.property_size()) prop = filter.property(0)", "ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters and ascending-orders on", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "reference in the same app and namespace as the query - if an", "% filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if", "datastore_pb.Query_Filter.EQUAL): if end_key is None or limit < end_key: end_key = limit end_inclusive", "start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def", "writing, software # distributed under the License is distributed on an \"AS IS\"", "License. # \"\"\"Utility functions shared between the file and sqlite datastore stubs.\"\"\" import", "from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from", "datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not", "order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required", "prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key' % key_prop_name) ref_val = prop.value().referencevalue()", "val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to a key') limit = datastore_types.FromReferenceProperty(val)", "for o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY):", "queries that are not too large (sum of filters, orders, ancestor <= max_query_components)", "'1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters):", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "is a false value. \"\"\" if not key: return key key_path = key.to_path()", "key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op() if not (f.property_size()", "License. # You may obtain a copy of the License at # #", "'is on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in filters:", "key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key' % key_prop_name)", "key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only", "that are not too large (sum of filters, orders, ancestor <= max_query_components) -", "not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all", "queries which only allow filters and ascending-orders on __key__. Raises exceptions for illegal", "google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime", "%s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in filters: prop_name =", "query - if an inequality filter on prop X is used, the first", "message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor():", "end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises an ApplicationError if", "2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key", "_NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises an ApplicationError if the key", "return '' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__ table')", "end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal", "compliance with the License. # You may obtain a copy of the License", "(key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s but", "the property ' 'to which the inequality filter is applied. In your query", "isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return", "synthetic user ID from an email address. Note that this is not the", "else: break if remaining_filters: raise BadRequestError( 'Only comparison filters on ' + key_prop", "same property - filters on __key__ compare to a reference in the same", "requested in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind,", "from __kind__ key. Raises an ApplicationError if the key is not of the", "property: A Property which may have a user value. \"\"\" if property.value().has_uservalue(): uid", "false value. Returns: kind specified by key, or key if key is a", "namespace as the query - if an inequality filter on prop X is", "the same property - filters on __key__ compare to a reference in the", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "filters' ' + sort orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor =", "namespace is %s but ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name =", "__key__. Raises exceptions for illegal queries. Args: filters: the normalized filters of a", "app is %s but query app is %s' % (key_prop_name, ref_val.app(), query.app())) if", "too large. may not have more than %s filters' ' + sort orders", "a query. Returns: The key range (start, start_inclusive, end, end_inclusive) requested in the", "governing permissions and # limitations under the License. # \"\"\"Utility functions shared between", "<=, >, >=) filters all applied to the same property - filters on", "max_query_components: limit on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name", "ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per query", "or limit > start_key: start_key = limit start_inclusive = True remaining_orders = []", "= [] start_key = None start_inclusive = False end_key = None end_inclusive =", "op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >= start_key: start_key =", "if remaining_orders: raise BadRequestError('Only ascending order on ' + key_prop + ' supported')", "md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index", "filters: normalized (by datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize) orders from", "ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per query is supported. ' 'Encountered", "raise BadRequestError('__key__ kind must be compared to a key') limit = datastore_types.FromReferenceProperty(val) if", "'__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__ instance, or a false value.", "kind must be compared to a key') limit = datastore_types.FromReferenceProperty(val) if op ==", "limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or", "Query PB. filters: the normalized filters from query. orders: the normalized orders from", "namespace match query app and namespace - kindless queries only filter on __key__", "%s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in filters: if filter.property_size()", "start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises an", "orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal queries. Args: query: A", "\"\"\"Return a synthetic user ID from an email address. Note that this is", "filters, orders. Raises an ApplicationError when any of the following conditions are violated:", "ancestor <= max_query_components) - ancestor (if any) app and namespace match query app", "a false value. Returns: namespace specified by key, or key if key is", "inside transactions.') num_components = len(filters) + len(orders) if query.has_ancestor(): num_components += 1 if", "orders. Raises an ApplicationError when any of the following conditions are violated: -", "query: A Query PB. filters: the normalized filters from query. orders: the normalized", "not use this file except in compliance with the License. # You may", "stubs.\"\"\" import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore", "properties, expected 1' % filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name", "key is not of the form '__kind__'/name. Args: key: a key for a", "== key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except", "permissions and # limitations under the License. # \"\"\"Utility functions shared between the", "= md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]", "in a set of filters. Args: filters: The normalized filters from query. \"\"\"", "== datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <= end_key: end_key = limit", "License, Version 2.0 (the \"License\"); # you may not use this file except", "too large (sum of filters, orders, ancestor <= max_query_components) - ancestor (if any)", "any) must be on X Args: query: query to validate filters: normalized (by", "property - filters on __key__ compare to a reference in the same app", "more than %s filters' ' + sort orders ancestor total' % max_query_components) if", "property must be the same as the property ' 'to which the inequality", "form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__ instance, or", "normalized filters of a query. orders: the normalized orders of a query. Returns:", "An email address. Returns: A string userid derived from the email address. \"\"\"", "and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name", "normalized filters, orders. Raises an ApplicationError when any of the following conditions are", "datastore query with normalized filters, orders. Raises an ApplicationError when any of the", "num_components = len(filters) + len(orders) if query.has_ancestor(): num_components += 1 if num_components >", "if the key is not of the form '__kind__'/name. Args: key: a key", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "key. Raises an ApplicationError if the key is not of the form '__kind__'/name.", "filters: for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user", "The kind range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" if", "filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from", "is too large. may not have more than %s filters' ' + sort", "import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters,", "if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery(", "datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders,", "be on X Args: query: query to validate filters: normalized (by datastore_index.Normalize) filters", "on __key__. Raises exceptions for illegal queries. Args: filters: the normalized filters of", "datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only comparison filters on '", "datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.')", "validate filters: normalized (by datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize) orders", "'Only comparison filters on ' + key_prop + ' supported') if remaining_orders: raise", "BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if", "# you may not use this file except in compliance with the License.", "a synthetic user ID from an email address. Note that this is not", "large. may not have more than %s filters' ' + sort orders ancestor", "= prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per query is", "== datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >= start_key: start_key = limit", "key is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key", "user properties. Args: property: A Property which may have a user value. \"\"\"", "all applied to the same property - filters on __key__ compare to a", "= len(filters) + len(orders) if query.has_ancestor(): num_components += 1 if num_components > max_query_components:", "return key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic", "agreed to in writing, software # distributed under the License is distributed on", "of a query. Returns: The key range (start, start_inclusive, end, end_inclusive) requested in", "namespace specified by key, or key if key is a false value. \"\"\"", "False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op() if not", "elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per query is supported. '", "kind name from __kind__ key. Raises an ApplicationError if the key is not", "email: An email address. Returns: A string userid derived from the email address.", "filter in filters: for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a", "key range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" remaining_filters =", "first sort property must be the same as the property ' 'to which", "BadRequest('%s filter value must be a Key' % key_prop_name) ref_val = prop.value().referencevalue() if", "orders: normalized (by datastore_index.Normalize) orders from query max_query_components: limit on query complexity \"\"\"", "(the \"License\"); # you may not use this file except in compliance with", "Raises an ApplicationError if the key is not of the form '__kind__'/name. Args:", "normalized (by datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize) orders from query", "datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <= end_key:", "SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an email address. Note that this", "for a user properties. Args: property: A Property which may have a user", "datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders):", "supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema)", "userid derived from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1'", "filter is applied. In your query ' 'the first sort property is %s", "filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if not", "\"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive =", "not key: return key key_path = key.to_path() if (len(key_path) == 2 and key_path[0]", "a synthetic user ID for a user properties. Args: property: A Property which", "ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal queries. Args:", "the License. # \"\"\"Utility functions shared between the file and sqlite datastore stubs.\"\"\"", "# Unless required by applicable law or agreed to in writing, software #", "sort on __key__ ascending - multiple inequality (<, <=, >, >=) filters all", "is None or limit > start_key: start_key = limit start_inclusive = True remaining_orders", "PB. filters: the normalized filters from query. orders: the normalized orders from query.", "'__kind__'/name. Args: key: a key for a __kind__ instance, or a false value.", "by applicable law or agreed to in writing, software # distributed under the", "query. orders: the normalized orders of a query. Returns: The key range (start,", "name from __namespace__ key. Raises an ApplicationError if the key is not of", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "from query. orders: the normalized orders from query. Returns: The kind range (start,", "continue val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared", "same user ID found in the production system. Args: email: An email address.", "Args: query: query to validate filters: normalized (by datastore_index.Normalize) filters from query orders:", "app is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter", "the same as the property ' 'to which the inequality filter is applied.", "def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises an ApplicationError if the", "return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for all user", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "+ key_prop + ' supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters,", "only allow filters and ascending-orders on __key__. Raises exceptions for illegal queries. Args:", "to a reference in the same app and namespace as the query -", "not query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name", "Args: email: An email address. Returns: A string userid derived from the email", "in a synthetic user ID for a user properties. Args: property: A Property", "filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for", "sort property is %s but the inequality filter ' 'is on %s' %", "is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name !=", "if end_key is None or limit <= end_key: end_key = limit end_inclusive =", "have an ancestor - queries that are not too large (sum of filters,", "and namespace as the query - if an inequality filter on prop X", "if not prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key' % key_prop_name) ref_val", "' supported') if remaining_orders: raise BadRequestError('Only ascending order on ' + key_prop +", "and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query,", "== datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is None or limit >", "file except in compliance with the License. # You may obtain a copy", "and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for order in", "filters, orders, ancestor <= max_query_components) - ancestor (if any) app and namespace match", "expected 1' % filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name ==", "#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the", "be the same as the property ' 'to which the inequality filter is", "instance, or a false value. Returns: namespace specified by key, or key if", "query.app() != ancestor.app(): BadRequest('query app is %s but ancestor app is %s' %", "BadRequest('%s filter app is %s but query app is %s' % (key_prop_name, ref_val.app(),", "raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders)", "a __kind__ instance, or a false value. Returns: kind specified by key, or", "not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive,", "License for the specific language governing permissions and # limitations under the License.", "\"\"\" remaining_filters = [] start_key = None start_inclusive = False end_key = None", "not of the form '__kind__'/name. Args: key: a key for a __kind__ instance,", "= True remaining_orders = [] for o in orders: if not (o.direction() ==", "+ key_prop + ' supported') if remaining_orders: raise BadRequestError('Only ascending order on '", "key_path = key.to_path() if (len(key_path) == 2 and key_path[0] == '__kind__' and isinstance(key_path[1],", "an inequality filter on prop X is used, the first order (if any)", "is %s but query app is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space()", "== datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError(", "to in writing, software # distributed under the License is distributed on an", "query. \"\"\" remaining_filters = [] start_key = None start_inclusive = False end_key =", "order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except __key__ ascending') def", "not (f.property_size() == 1 and f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN", "key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except __key__", "!= unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name:", "implied. # See the License for the specific language governing permissions and #", "orders from query max_query_components: limit on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError(", "first sort property is %s but the inequality filter ' 'is on %s'", "\"License\"); # you may not use this file except in compliance with the", "not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters:", "The key range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" remaining_filters", "end_inclusive) requested in the query. \"\"\" remaining_filters = [] start_key = None start_inclusive", "orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "for filter in filters: if filter.property_size() != 1: BadRequest('Filter has %d properties, expected", "app is %s but ancestor app is %s' % (query.app(), ancestor.app())) if query.name_space()", "(start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises", "for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user ID", "kind range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" if query.has_ancestor():", "queries. Args: filters: the normalized filters of a query. orders: the normalized orders", "filters: the normalized filters of a query. orders: the normalized orders of a", "if not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for", "query max_query_components: limit on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message)", "datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >= start_key: start_key = limit start_inclusive", "__kind__ key. Raises an ApplicationError if the key is not of the form", "user ID found in the production system. Args: email: An email address. Returns:", "(by datastore_index.Normalize) orders from query max_query_components: limit on query complexity \"\"\" def BadRequest(message):", "under the License. # \"\"\"Utility functions shared between the file and sqlite datastore", "value. \"\"\" if not key: return key key_path = key.to_path() if (len(key_path) ==", "orders: the normalized orders of a query. Returns: The key range (start, start_inclusive,", "range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" remaining_filters = []", "to the same property - filters on __key__ compare to a reference in", "+ ' supported') if remaining_orders: raise BadRequestError('Only ascending order on ' + key_prop", "ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace is %s' % (query.name_space(), ancestor.name_space()))", "import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import", "- ancestor (if any) app and namespace match query app and namespace -", "queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind),", "is required for non-__key__ filters') for order in orders: prop_name = order.property().decode('utf-8') if", "query. Returns: The kind range (start, start_inclusive, end, end_inclusive) requested in the query.", "same as the property ' 'to which the inequality filter is applied. In", "or implied. # See the License for the specific language governing permissions and", "prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app is %s but query app", "key is a false value. \"\"\" if not key: return key key_path =", "if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name", "basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a", "on ' + key_prop + ' supported') return (start_key, start_inclusive, end_key, end_inclusive) def", "Returns: kind specified by key, or key if key is a false value.", "= datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op() if not (f.property_size() ==", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "from query. Returns: The kind range (start, start_inclusive, end, end_inclusive) requested in the", "start_key is None or limit >= start_key: start_key = limit start_inclusive = False", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "in a synthetic user ID for all user properties in a set of", "datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pb from google.appengine.runtime import apiproxy_errors", "ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s but query namespace is %s'", "the inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind():", "BadRequestError('ancestor queries not allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return", "from query max_query_components: limit on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST,", "a datastore query with normalized filters, orders. Raises an ApplicationError when any of", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "filter per query is supported. ' 'Encountered both %s and %s') % (ineq_prop_name,", "\"\"\" for filter in filters: for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill", "in writing, software # distributed under the License is distributed on an \"AS", "normalized orders of a query. Returns: The key range (start, start_inclusive, end, end_inclusive)", "- if an inequality filter on prop X is used, the first order", "namespace name from __namespace__ key. Raises an ApplicationError if the key is not", "from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d'", "in filters: if filter.property_size() != 1: BadRequest('Filter has %d properties, expected 1' %", "def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query with normalized filters, orders.", "' + sort orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor()", "is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in filters: if", "datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries", "namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter in filters:", "query.name_space(): BadRequest('%s filter namespace is %s but query namespace is %s' % (key_prop_name,", "ref_val.app() != query.app(): BadRequest('%s filter app is %s but query app is %s'", "BadRequest('kind is required for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse", "(op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is None or limit", "on __key__ compare to a reference in the same app and namespace as", "and not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val =", "illegal queries. Args: filters: the normalized filters of a query. orders: the normalized", "\"\"\"Fill in a synthetic user ID for a user properties. Args: property: A", "Raises exceptions for illegal queries. Args: query: A Query PB. filters: the normalized", "A Property which may have a user value. \"\"\" if property.value().has_uservalue(): uid =", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "filters and ascending-orders on __key__. Raises exceptions for illegal queries. Args: filters: the", "+ ' supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse", "return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries.", "kind specified by key, or key if key is a false value. \"\"\"", "start_key = None start_inclusive = False end_key = None end_inclusive = False key_prop", "is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters,", "on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY", "not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to a key') limit =", "the normalized filters from query. orders: the normalized orders from query. Returns: The", "google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore", "of the following conditions are violated: - transactional queries have an ancestor -", "inequality filter is applied. In your query ' 'the first sort property is", "op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__", "key: a key for a __kind__ instance, or a false value. Returns: kind", "'__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__ instance, or a", "queries only filter on __key__ and only sort on __key__ ascending - multiple", "remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be", "= orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort property must be the", "for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for", "orders, ancestor <= max_query_components) - ancestor (if any) app and namespace match query", "the file and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types from", "orders, max_query_components): \"\"\"Validate a datastore query with normalized filters, orders. Raises an ApplicationError", "required for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which", "only sort on __key__ ascending - multiple inequality (<, <=, >, >=) filters", "datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is None or limit > start_key:", "end_key = None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters:", "return key key_path = key.to_path() if len(key_path) == 2 and key_path[0] == '__namespace__':", "or a false value. Returns: namespace specified by key, or key if key", "violated: - transactional queries have an ancestor - queries that are not too", "key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user", "user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x in", "BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises", "not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.') num_components = len(filters) +", "1 if num_components > max_query_components: BadRequest('query is too large. may not have more", "orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__", "and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only comparison", "you may not use this file except in compliance with the License. #", "between the file and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import datastore_types", "from query orders: normalized (by datastore_index.Normalize) orders from query max_query_components: limit on query", "end_key: end_key = limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op", "datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is None or limit < end_key:", "the normalized orders of a query. Returns: The key range (start, start_inclusive, end,", "python # # Copyright 2007 Google Inc. # # Licensed under the Apache", "start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive)", "datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore", "ID found in the production system. Args: email: An email address. Returns: A", "(if any) app and namespace match query app and namespace - kindless queries", "properties in a set of filters. Args: filters: The normalized filters from query.", "non-__key__ filters') for order in orders: prop_name = order.property().decode('utf-8') if not (prop_name ==", "raise BadRequestError( 'Only comparison filters on ' + key_prop + ' supported') if", "if the key is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key:", "is required for all orders except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries", "key.to_path() if (len(key_path) == 2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return", "filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind", "\"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries. Args: query: A Query PB.", "num_components > max_query_components: BadRequest('query is too large. may not have more than %s", "True remaining_orders = [] for o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING", "= f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to a", "limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit", "only filter on __key__ and only sort on __key__ ascending - multiple inequality", "== key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must be a Key' %", "are allowed inside transactions.') num_components = len(filters) + len(orders) if query.has_ancestor(): num_components +=", "(if any) must be on X Args: query: query to validate filters: normalized", "is %s but the inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name))", "BadRequest('kind is required for non-__key__ filters') for order in orders: prop_name = order.property().decode('utf-8')", "(prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind is required for all orders", "if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >= start_key: start_key", "datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only", "if end_key is None or limit < end_key: end_key = limit end_inclusive =", "value. Returns: namespace specified by key, or key if key is a false", "start_key = limit start_inclusive = True remaining_orders = [] for o in orders:", "Args: filters: The normalized filters from query. \"\"\" for filter in filters: for", "use this file except in compliance with the License. # You may obtain", "+= 1 if num_components > max_query_components: BadRequest('query is too large. may not have", "(ineq_prop_name, prop_name)) if ineq_prop_name is not None and orders: first_order_prop = orders[0].property().decode('utf-8') if", "if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to a key') limit", "for a __kind__ instance, or a false value. Returns: kind specified by key,", "email address. Note that this is not the same user ID found in", "query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and", "is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace", "if not key: return key key_path = key.to_path() if (len(key_path) == 2 and", "key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for order", "%s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is", "order on ' + key_prop + ' supported') return (start_key, start_inclusive, end_key, end_inclusive)", "- transactional queries have an ancestor - queries that are not too large", "transactions.') num_components = len(filters) + len(orders) if query.has_ancestor(): num_components += 1 if num_components", "def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal queries.", "(start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor", "and only sort on __key__ ascending - multiple inequality (<, <=, >, >=)", "normalized orders from query. Returns: The kind range (start, start_inclusive, end, end_inclusive) requested", "app and namespace match query app and namespace - kindless queries only filter", "ascending order on ' + key_prop + ' supported') return (start_key, start_inclusive, end_key,", "' supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__", "an ApplicationError if the key is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID.", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "len(orders) if query.has_ancestor(): num_components += 1 if num_components > max_query_components: BadRequest('query is too", "specific language governing permissions and # limitations under the License. # \"\"\"Utility functions", "inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for", "' 'is on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in", "ID for all user properties in a set of filters. Args: filters: The", "filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user ID for a user", "if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s but query namespace is", "= datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor", "prop X is used, the first order (if any) must be on X", "be a Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s", "for a __namespace__ instance, or a false value. Returns: namespace specified by key,", "and # limitations under the License. # \"\"\"Utility functions shared between the file", "isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters,", "property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in a synthetic user ID for", "end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key", "Google Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "large (sum of filters, orders, ancestor <= max_query_components) - ancestor (if any) app", "on prop X is used, the first order (if any) must be on", "or limit <= end_key: end_key = limit end_inclusive = False elif (op ==", "value. \"\"\" if not key: return key key_path = key.to_path() if len(key_path) ==", "and namespace match query app and namespace - kindless queries only filter on", "_KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises an ApplicationError", "orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__", "2.0 (the \"License\"); # you may not use this file except in compliance", "is %s but ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None", "= [] for o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property()", "prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for order in orders:", "start_inclusive, end, end_inclusive) requested in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries", "\"\"\"Extract namespace name from __namespace__ key. Raises an ApplicationError if the key is", "\"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal queries. Args: query: A Query", "key. Raises an ApplicationError if the key is not of the form '__namespace__'/name", "for the specific language governing permissions and # limitations under the License. #", "comparison filters on ' + key_prop + ' supported') if remaining_orders: raise BadRequestError('Only", "if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must be a", "!= query.app(): BadRequest('%s filter app is %s but query app is %s' %", "ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters and ascending-orders on __key__. Raises", "basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders):", "google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query with", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__", "orders of a query. Returns: The key range (start, start_inclusive, end, end_inclusive) requested", "start_inclusive, end, end_inclusive) requested in the query. \"\"\" remaining_filters = [] start_key =", "if len(key_path) == 2 and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return", "BadRequest('%s filter namespace is %s but query namespace is %s' % (key_prop_name, ref_val.name_space(),", "query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.') num_components =", "filters all applied to the same property - filters on __key__ compare to", "the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' %", "synthetic user ID for all user properties in a set of filters. Args:", "(sum of filters, orders, ancestor <= max_query_components) - ancestor (if any) app and", "%s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not None and orders: first_order_prop =", "filter app is %s but query app is %s' % (key_prop_name, ref_val.app(), query.app()))", "sort property must be the same as the property ' 'to which the", "== 2 and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if", "raise BadRequestError('Only ascending order on ' + key_prop + ' supported') return (start_key,", "ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query with normalized filters, orders. Raises", "query orders: normalized (by datastore_index.Normalize) orders from query max_query_components: limit on query complexity", "required for non-__key__ filters') for order in orders: prop_name = order.property().decode('utf-8') if not", "the first order (if any) must be on X Args: query: query to", "total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query", "= limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL):", "# # Unless required by applicable law or agreed to in writing, software", "( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction(): if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside", "or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__ instance, or a false", "ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is", "order in orders: prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction()", "express or implied. # See the License for the specific language governing permissions", "if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <= end_key: end_key", "production system. Args: email: An email address. Returns: A string userid derived from", "an ApplicationError when any of the following conditions are violated: - transactional queries", "!= query.name_space(): BadRequest('%s filter namespace is %s but query namespace is %s' %", "2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the", "a Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter", "query.has_ancestor(): num_components += 1 if num_components > max_query_components: BadRequest('query is too large. may", "query is supported. ' 'Encountered both %s and %s') % (ineq_prop_name, prop_name)) if", "break if remaining_filters: raise BadRequestError( 'Only comparison filters on ' + key_prop +", "f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f)", "end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract", "# # Copyright 2007 Google Inc. # # Licensed under the Apache License,", "- queries that are not too large (sum of filters, orders, ancestor <=", "Returns: The kind range (start, start_inclusive, end, end_inclusive) requested in the query. \"\"\"", "key: a key for a __namespace__ instance, or a false value. Returns: namespace", "for order in orders: prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name and", "either express or implied. # See the License for the specific language governing", "orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort property must", "limit > start_key: start_key = limit start_inclusive = True remaining_orders = [] for", "None or limit >= start_key: start_key = limit start_inclusive = False elif (op", "2 and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1],", "'__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__ table') def", "None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort", "- filters on __key__ compare to a reference in the same app and", "key: return key key_path = key.to_path() if len(key_path) == 2 and key_path[0] ==", "in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else:", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries. Args:", "' 'the first sort property is %s but the inequality filter ' 'is", "google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore", "limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if", "address. Returns: A string userid derived from the email address. \"\"\" user_id_digest =", "is %s but query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op()", "= False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is", "ord(x) for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic", "ineq_prop_name is not None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name:", "ineq_prop_name: BadRequest('The first sort property must be the same as the property '", "key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or", "key for a __kind__ instance, or a false value. Returns: kind specified by", "!= prop_name: BadRequest(('Only one inequality filter per query is supported. ' 'Encountered both", "for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an email", "key if key is a false value. \"\"\" if not key: return key", "datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must", "start_key: start_key = limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op", "must be compared to a key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN:", "= filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s", "= None start_inclusive = False end_key = None end_inclusive = False key_prop =", "a reference in the same app and namespace as the query - if", "o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise BadRequestError( 'Only comparison filters", "!= ancestor.app(): BadRequest('query app is %s but ancestor app is %s' % (query.app(),", "the License. # You may obtain a copy of the License at #", "or key if key is a false value. \"\"\" if not key: return", "from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from", "prop_name)) if ineq_prop_name is not None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop", "if not query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.') num_components = len(filters)", "a false value. \"\"\" if not key: return key key_path = key.to_path() if", "which may have a user value. \"\"\" if property.value().has_uservalue(): uid = SynthesizeUserId(property.value().uservalue().email()) if", "app and namespace as the query - if an inequality filter on prop", "but ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for filter", "# \"\"\"Utility functions shared between the file and sqlite datastore stubs.\"\"\" import md5", "is applied. In your query ' 'the first sort property is %s but", "\"\"\" if not key: return key key_path = key.to_path() if (len(key_path) == 2", "if (len(key_path) == 2 and key_path[0] == '__kind__' and isinstance(key_path[1], basestring)): return key_path[1]", "BadRequest('query is too large. may not have more than %s filters' ' +", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "> max_query_components: BadRequest('query is too large. may not have more than %s filters'", "if not (f.property_size() == 1 and f.property(0).name() == key_prop and not (op ==", "all user properties in a set of filters. Args: filters: The normalized filters", "to a key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is", "X is used, the first order (if any) must be on X Args:", "complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name =", "len(key_path) == 2 and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return ''", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name):", "ID for a user properties. Args: property: A Property which may have a", "is %s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s", "start_inclusive = True remaining_orders = [] for o in orders: if not (o.direction()", "false value. \"\"\" if not key: return key key_path = key.to_path() if (len(key_path)", "queries have an ancestor - queries that are not too large (sum of", "prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue():", "filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries. Args: query: A", "ApplicationError if the key is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args:", "The normalized filters from query. \"\"\" for filter in filters: for property in", "op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <= end_key: end_key =", "a synthetic user ID for all user properties in a set of filters.", "if ref_val.app() != query.app(): BadRequest('%s filter app is %s but query app is", "# limitations under the License. # \"\"\"Utility functions shared between the file and", "query.has_ancestor(): BadRequest('Only ancestor queries are allowed inside transactions.') num_components = len(filters) + len(orders)", "set of filters. Args: filters: The normalized filters from query. \"\"\" for filter", "or limit < end_key: end_key = limit end_inclusive = True if op ==", "%s but ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name = None for", "same app and namespace as the query - if an inequality filter on", "filter in filters: if filter.property_size() != 1: BadRequest('Filter has %d properties, expected 1'", "which the inequality filter is applied. In your query ' 'the first sort", "an ancestor - queries that are not too large (sum of filters, orders,", "ancestor queries are allowed inside transactions.') num_components = len(filters) + len(orders) if query.has_ancestor():", "query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s but query namespace", "%s but query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in", "(start, start_inclusive, end, end_inclusive) requested in the query. \"\"\" remaining_filters = [] start_key", "= limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None", "limit >= start_key: start_key = limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL", "op == datastore_pb.Query_Filter.EQUAL): if start_key is None or limit > start_key: start_key =", "query. orders: the normalized orders from query. Returns: The kind range (start, start_inclusive,", "query. Returns: The key range (start, start_inclusive, end, end_inclusive) requested in the query.", "sort orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app()", "= datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <=", "as the query - if an inequality filter on prop X is used,", "table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an email address. Note", "A string userid derived from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id", "= query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is %s but ancestor app", "%s but the inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if", "ancestor.name_space())) ineq_prop_name = None for filter in filters: if filter.property_size() != 1: BadRequest('Filter", "'to which the inequality filter is applied. In your query ' 'the first", "import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import", "filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter", "if num_components > max_query_components: BadRequest('query is too large. may not have more than", "= False end_key = None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f", "% (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but ancestor", "start_key: start_key = limit start_inclusive = True remaining_orders = [] for o in", "== '__kind__' and isinstance(key_path[1], basestring)): return key_path[1] raise BadRequestError('invalid Key for __kind__ table')", "a key for a __namespace__ instance, or a false value. Returns: namespace specified", "A Query PB. filters: the normalized filters from query. orders: the normalized orders", "name from __kind__ key. Raises an ApplicationError if the key is not of", "= limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL):", "transactional queries have an ancestor - queries that are not too large (sum", "queries. Args: query: A Query PB. filters: the normalized filters from query. orders:", "raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries.", "max_query_components) - ancestor (if any) app and namespace match query app and namespace", "%s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not None and orders:", "prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must be", "normalized (by datastore_index.Normalize) orders from query max_query_components: limit on query complexity \"\"\" def", "an ApplicationError if the key is not of the form '__kind__'/name. Args: key:", "filters: the normalized filters from query. orders: the normalized orders from query. Returns:", "first order (if any) must be on X Args: query: query to validate", "with the License. # You may obtain a copy of the License at", "(<, <=, >, >=) filters all applied to the same property - filters", "__namespace__ instance, or a false value. Returns: namespace specified by key, or key", "def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID for all user properties in", "f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to a key')", "%s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but", "= prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value must", "filters from query orders: normalized (by datastore_index.Normalize) orders from query max_query_components: limit on", "datastore_pb.Query_Filter.LESS_THAN: if end_key is None or limit <= end_key: end_key = limit end_inclusive", "% (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() != query.name_space(): BadRequest('%s filter namespace is %s", "be compared to a key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if", "ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app is %s but", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "start_inclusive = False end_key = None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for", "_KindKeyToString(key): \"\"\"Extract kind name from __kind__ key. Raises an ApplicationError if the key", "+ ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill", "both %s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not None and", "+ sort orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if", "%d properties, expected 1' % filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if", "x in user_id_digest])[:20] return user_id def FillUsersInQuery(filters): \"\"\"Fill in a synthetic user ID", "first_order_prop != ineq_prop_name: BadRequest('The first sort property must be the same as the", "False end_key = None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in", "1' % filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name:", "= ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind", "__key__ ascending - multiple inequality (<, <=, >, >=) filters all applied to", "__kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal", "but ancestor app is %s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query", "found in the production system. Args: email: An email address. Returns: A string", "\"\"\"Parse queries which only allow filters and ascending-orders on __key__. Raises exceptions for", "__kind__ instance, or a false value. Returns: kind specified by key, or key", "Args: filters: the normalized filters of a query. orders: the normalized orders of", "ineq_prop_name is None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality", "and f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)):", "law or agreed to in writing, software # distributed under the License is", "or op == datastore_pb.Query_Filter.EQUAL): if end_key is None or limit < end_key: end_key", "limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if", "the License for the specific language governing permissions and # limitations under the", "raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.has_transaction():", "None or limit > start_key: start_key = limit start_inclusive = True remaining_orders =", "= key.to_path() if len(key_path) == 2 and key_path[0] == '__namespace__': if key_path[1] ==", "your query ' 'the first sort property is %s but the inequality filter", "for filter in filters: for property in filter.property_list(): FillUser(property) def FillUser(property): \"\"\"Fill in", "datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize) orders from query max_query_components: limit", "if query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is %s", "FillUser(property): \"\"\"Fill in a synthetic user ID for a user properties. Args: property:", "except __key__ ascending') def ParseKeyFilteredQuery(filters, orders): \"\"\"Parse queries which only allow filters and", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "__namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an email address.", "if query.has_ancestor(): num_components += 1 if num_components > max_query_components: BadRequest('query is too large.", "filters from query. orders: the normalized orders from query. Returns: The kind range", "but the inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if not", "the query. \"\"\" remaining_filters = [] start_key = None start_inclusive = False end_key", "Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions", "prop_name = order.property().decode('utf-8') if not (prop_name == key_prop_name and order.direction() is datastore_pb.Query_Order.ASCENDING): BadRequest('kind", "max_query_components): \"\"\"Validate a datastore query with normalized filters, orders. Raises an ApplicationError when", "query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None:", "BadRequest(('Only one inequality filter per query is supported. ' 'Encountered both %s and", "from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from", "key.to_path() if len(key_path) == 2 and key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID:", "query: query to validate filters: normalized (by datastore_index.Normalize) filters from query orders: normalized", "- kindless queries only filter on __key__ and only sort on __key__ ascending", "__key__ and only sort on __key__ ascending - multiple inequality (<, <=, >,", "limitations under the License. # \"\"\"Utility functions shared between the file and sqlite", "\"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = (", "filter on prop X is used, the first order (if any) must be", "== datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not", "%s but ancestor app is %s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space():", "%s filters' ' + sort orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor", "!= unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for order in orders: prop_name", "key_path[0] == '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return", "datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name elif", "datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb", "not too large (sum of filters, orders, ancestor <= max_query_components) - ancestor (if", "filters, orders, max_query_components): \"\"\"Validate a datastore query with normalized filters, orders. Raises an", "== '__namespace__': if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1]", "in the same app and namespace as the query - if an inequality", "of the form '__kind__'/name. Args: key: a key for a __kind__ instance, or", "end, end_inclusive) requested in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not", "Args: key: a key for a __kind__ instance, or a false value. Returns:", "return key_path[1] raise BadRequestError('invalid Key for __kind__ table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse", "in compliance with the License. # You may obtain a copy of the", "language governing permissions and # limitations under the License. # \"\"\"Utility functions shared", "ancestor.app(): BadRequest('query app is %s but ancestor app is %s' % (query.app(), ancestor.app()))", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name", "the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for a __namespace__ instance,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "end, end_inclusive) requested in the query. \"\"\" remaining_filters = [] start_key = None", "key for a __namespace__ instance, or a false value. Returns: namespace specified by", "end_key = limit end_inclusive = False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op ==", "if query.app() != ancestor.app(): BadRequest('query app is %s but ancestor app is %s'", "def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name = datastore_types._KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY)", "start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key", "applied to the same property - filters on __key__ compare to a reference", "exceptions for illegal queries. Args: query: A Query PB. filters: the normalized filters", "'' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def", "(query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace", "\"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x", "= True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is None or limit >=", "if first_order_prop != ineq_prop_name: BadRequest('The first sort property must be the same as", "None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op =", "remaining_orders: raise BadRequestError('Only ascending order on ' + key_prop + ' supported') return", "ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries. Args: query:", "queries. Raises exceptions for illegal queries. Args: query: A Query PB. filters: the", "See the License for the specific language governing permissions and # limitations under", "key_prop + ' supported') return (start_key, start_inclusive, end_key, end_inclusive) def ParseKindQuery(query, filters, orders):", "not key: return key key_path = key.to_path() if len(key_path) == 2 and key_path[0]", "this is not the same user ID found in the production system. Args:", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "shared between the file and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api import", "prop_name: BadRequest(('Only one inequality filter per query is supported. ' 'Encountered both %s", "is not None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The", "key, or key if key is a false value. \"\"\" if not key:", "the same user ID found in the production system. Args: email: An email", "on __key__ ascending - multiple inequality (<, <=, >, >=) filters all applied", "email address. \"\"\" user_id_digest = md5.new(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x)", "ApplicationError if the key is not of the form '__kind__'/name. Args: key: a", "None or limit <= end_key: end_key = limit end_inclusive = False elif (op", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "query.has_ancestor(): ancestor = query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is %s but", "is None or limit < end_key: end_key = limit end_inclusive = True if", "in the production system. Args: email: An email address. Returns: A string userid", "prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is", "datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__", "is used, the first order (if any) must be on X Args: query:", "unapplied_log_timestamp_us_name): BadRequest('kind is required for non-__key__ filters') for order in orders: prop_name =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "key_path = key.to_path() if len(key_path) == 2 and key_path[0] == '__namespace__': if key_path[1]", "elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if start_key is None or", "limit on query complexity \"\"\" def BadRequest(message): raise apiproxy_errors.ApplicationError( datastore_pb.Error.BAD_REQUEST, message) key_prop_name =", "end_inclusive) requested in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed')", "on ' + key_prop + ' supported') if remaining_orders: raise BadRequestError('Only ascending order", "orders: the normalized orders from query. Returns: The kind range (start, start_inclusive, end,", "if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email):", "limit start_inclusive = True remaining_orders = [] for o in orders: if not", "Raises an ApplicationError when any of the following conditions are violated: - transactional", "end_key, end_inclusive) def ParseKindQuery(query, filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for", "(o.direction() == datastore_pb.Query_Order.ASCENDING and o.property() == datastore_types._KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break if remaining_filters: raise", "__kind__ (schema) queries. Raises exceptions for illegal queries. Args: query: A Query PB.", "% key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app is", "is supported. ' 'Encountered both %s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name", "a set of filters. Args: filters: The normalized filters from query. \"\"\" for", "= False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op = f.op() if", "to validate filters: normalized (by datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize)", "ascending-orders on __key__. Raises exceptions for illegal queries. Args: filters: the normalized filters", "filters: if filter.property_size() != 1: BadRequest('Filter has %d properties, expected 1' % filter.property_size())", "end_key is None or limit < end_key: end_key = limit end_inclusive = True", "allowed inside transactions.') num_components = len(filters) + len(orders) if query.has_ancestor(): num_components += 1", "app is %s' % (query.app(), ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is", "BadRequest('query namespace is %s but ancestor namespace is %s' % (query.name_space(), ancestor.name_space())) ineq_prop_name", "'the first sort property is %s but the inequality filter ' 'is on", "property ' 'to which the inequality filter is applied. In your query '", "ancestor.app())) if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace is", "return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key.", "\"\"\"Utility functions shared between the file and sqlite datastore stubs.\"\"\" import md5 from", "key_path[1] == datastore_types._EMPTY_NAMESPACE_ID: return '' if isinstance(key_path[1], basestring): return key_path[1] raise BadRequestError('invalid Key", "if key is a false value. \"\"\" if not key: return key key_path", "end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract", "query.ancestor() if query.app() != ancestor.app(): BadRequest('query app is %s but ancestor app is", "op == datastore_pb.Query_Filter.EQUAL): if end_key is None or limit < end_key: end_key =", "namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name", "user ID from an email address. Note that this is not the same", "BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from", "the key is not of the form '__kind__'/name. Args: key: a key for", "Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID from an", "import md5 from google.appengine.api import datastore_types from google.appengine.api.datastore_errors import BadRequestError from google.appengine.datastore import", "Version 2.0 (the \"License\"); # you may not use this file except in", "inequality (<, <=, >, >=) filters all applied to the same property -", "if query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace is %s'", "except in compliance with the License. # You may obtain a copy of", "(op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if", "start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def", "on X Args: query: query to validate filters: normalized (by datastore_index.Normalize) filters from", "any) app and namespace match query app and namespace - kindless queries only", "illegal queries. Args: query: A Query PB. filters: the normalized filters from query.", "== datastore_pb.Query_Filter.EQUAL): if start_key is None or limit > start_key: start_key = limit", "remaining_orders = [] for o in orders: if not (o.direction() == datastore_pb.Query_Order.ASCENDING and", "first_order_prop = orders[0].property().decode('utf-8') if first_order_prop != ineq_prop_name: BadRequest('The first sort property must be", "on %s' % (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in filters: prop_name", "query to validate filters: normalized (by datastore_index.Normalize) filters from query orders: normalized (by", "= filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name != unapplied_log_timestamp_us_name): BadRequest('kind is required", "filters, orders) return (_KindKeyToString(start_kind), start_inclusive, _KindKeyToString(end_kind), end_inclusive) def _KindKeyToString(key): \"\"\"Extract kind name from", "false value. \"\"\" if not key: return key key_path = key.to_path() if len(key_path)", "val = f.property(0).value() if not val.has_referencevalue(): raise BadRequestError('__key__ kind must be compared to", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "end_key = limit end_inclusive = True if op == datastore_pb.Query_Filter.GREATER_THAN: if start_key is", "from an email address. Note that this is not the same user ID", "must be a Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app():", "exceptions for illegal queries. Args: filters: the normalized filters of a query. orders:", "has %d properties, expected 1' % filter.property_size()) prop = filter.property(0) prop_name = prop.name().decode('utf-8')", "or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue(): raise", ">=) filters all applied to the same property - filters on __key__ compare", "apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query with normalized filters,", "filters, orders): \"\"\"Parse __kind__ (schema) queries. Raises exceptions for illegal queries. Args: query:", "filters from query. \"\"\" for filter in filters: for property in filter.property_list(): FillUser(property)", "'Encountered both %s and %s') % (ineq_prop_name, prop_name)) if ineq_prop_name is not None", "key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app is %s", "= limit start_inclusive = True remaining_orders = [] for o in orders: if", "match query app and namespace - kindless queries only filter on __key__ and", "_NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises an ApplicationError", "None or limit < end_key: end_key = limit end_inclusive = True if op", "\"\"\"Validate a datastore query with normalized filters, orders. Raises an ApplicationError when any", "Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app", "%s but query app is %s' % (key_prop_name, ref_val.app(), query.app())) if ref_val.name_space() !=", "(key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name", "conditions are violated: - transactional queries have an ancestor - queries that are", "applied. In your query ' 'the first sort property is %s but the", "raise BadRequestError('invalid Key for __namespace__ table') def SynthesizeUserId(email): \"\"\"Return a synthetic user ID", "ascending - multiple inequality (<, <=, >, >=) filters all applied to the", "' + key_prop + ' supported') if remaining_orders: raise BadRequestError('Only ascending order on", "key key_path = key.to_path() if len(key_path) == 2 and key_path[0] == '__namespace__': if", "1 and f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN or op ==", "which only allow filters and ascending-orders on __key__. Raises exceptions for illegal queries.", "filter value must be a Key' % key_prop_name) ref_val = prop.value().referencevalue() if ref_val.app()", "compared to a key') limit = datastore_types.FromReferenceProperty(val) if op == datastore_pb.Query_Filter.LESS_THAN: if end_key", "BadRequest('query app is %s but ancestor app is %s' % (query.app(), ancestor.app())) if", "== 1 and f.property(0).name() == key_prop and not (op == datastore_pb.Query_Filter.IN or op", "== key_prop and not (op == datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue", "query ' 'the first sort property is %s but the inequality filter '", "None: ineq_prop_name = prop_name elif ineq_prop_name != prop_name: BadRequest(('Only one inequality filter per", "start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive)", "end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises an ApplicationError if", "for filter in filters: prop_name = filter.property(0).name().decode('utf-8') if (prop_name != key_prop_name and prop_name", "allowed') start_kind, start_inclusive, end_kind, end_inclusive = ParseKeyFilteredQuery( filters, orders) return (_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind),", "= prop.value().referencevalue() if ref_val.app() != query.app(): BadRequest('%s filter app is %s but query", "filters of a query. orders: the normalized orders of a query. Returns: The", "\"\"\" if not key: return key key_path = key.to_path() if len(key_path) == 2", "BadRequestError('__key__ kind must be compared to a key') limit = datastore_types.FromReferenceProperty(val) if op", "Args: property: A Property which may have a user value. \"\"\" if property.value().has_uservalue():", "- multiple inequality (<, <=, >, >=) filters all applied to the same", "but query namespace is %s' % (key_prop_name, ref_val.name_space(), query.name_space())) if (filter.op() in datastore_index.INEQUALITY_OPERATORS", "if ineq_prop_name is not None and orders: first_order_prop = orders[0].property().decode('utf-8') if first_order_prop !=", "Returns: namespace specified by key, or key if key is a false value.", "query.name_space() != ancestor.name_space(): BadRequest('query namespace is %s but ancestor namespace is %s' %", "if remaining_filters: raise BadRequestError( 'Only comparison filters on ' + key_prop + '", "or limit >= start_key: start_key = limit start_inclusive = False elif (op ==", "table') def ParseNamespaceQuery(query, filters, orders): \"\"\"Parse __namespace__ queries. Raises exceptions for illegal queries.", "__namespace__ queries. Raises exceptions for illegal queries. Args: query: A Query PB. filters:", "from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a datastore query", "% (first_order_prop, ineq_prop_name)) if not query.has_kind(): for filter in filters: prop_name = filter.property(0).name().decode('utf-8')", "or op == datastore_pb.Query_Filter.EQUAL): if start_key is None or limit > start_key: start_key", "__key__ compare to a reference in the same app and namespace as the", "app and namespace - kindless queries only filter on __key__ and only sort", "In your query ' 'the first sort property is %s but the inequality", "user properties in a set of filters. Args: filters: The normalized filters from", "<= max_query_components) - ancestor (if any) app and namespace match query app and", "+ len(orders) if query.has_ancestor(): num_components += 1 if num_components > max_query_components: BadRequest('query is", "query app and namespace - kindless queries only filter on __key__ and only", "datastore_pb.Query_Filter.IN or op == datastore_pb.Query_Filter.EXISTS)): remaining_filters.append(f) continue val = f.property(0).value() if not val.has_referencevalue():", "__namespace__ key. Raises an ApplicationError if the key is not of the form", "start_key = limit start_inclusive = False elif (op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL or op ==", "(_NamespaceKeyToString(start_kind), start_inclusive, _NamespaceKeyToString(end_kind), end_inclusive) def _NamespaceKeyToString(key): \"\"\"Extract namespace name from __namespace__ key. Raises", "not have more than %s filters' ' + sort orders ancestor total' %", "= None end_inclusive = False key_prop = datastore_types._KEY_SPECIAL_PROPERTY for f in filters: op", "compare to a reference in the same app and namespace as the query", "is not of the form '__kind__'/name. Args: key: a key for a __kind__", "the specific language governing permissions and # limitations under the License. # \"\"\"Utility", "ineq_prop_name = None for filter in filters: if filter.property_size() != 1: BadRequest('Filter has", "return key key_path = key.to_path() if (len(key_path) == 2 and key_path[0] == '__kind__'", "f in filters: op = f.op() if not (f.property_size() == 1 and f.property(0).name()", "may not have more than %s filters' ' + sort orders ancestor total'", "start_key is None or limit > start_key: start_key = limit start_inclusive = True", "are violated: - transactional queries have an ancestor - queries that are not", "the normalized orders from query. Returns: The kind range (start, start_inclusive, end, end_inclusive)", "or a false value. Returns: kind specified by key, or key if key", "value. Returns: kind specified by key, or key if key is a false", "BadRequest('Only ancestor queries are allowed inside transactions.') num_components = len(filters) + len(orders) if", "that this is not the same user ID found in the production system.", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "!= ineq_prop_name: BadRequest('The first sort property must be the same as the property", "normalized filters from query. orders: the normalized orders from query. Returns: The kind", "orders ancestor total' % max_query_components) if query.has_ancestor(): ancestor = query.ancestor() if query.app() !=", "in the query. \"\"\" if query.has_ancestor(): raise BadRequestError('ancestor queries not allowed') start_kind, start_inclusive,", "user ID for all user properties in a set of filters. Args: filters:", "import datastore_pb from google.appengine.runtime import apiproxy_errors def ValidateQuery(query, filters, orders, max_query_components): \"\"\"Validate a", "requested in the query. \"\"\" remaining_filters = [] start_key = None start_inclusive =", "synthetic user ID for a user properties. Args: property: A Property which may", "of filters. Args: filters: The normalized filters from query. \"\"\" for filter in", "prop_name = prop.name().decode('utf-8') if prop_name == key_prop_name: if not prop.value().has_referencevalue(): BadRequest('%s filter value", "False elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is None", "filters on ' + key_prop + ' supported') if remaining_orders: raise BadRequestError('Only ascending", "num_components += 1 if num_components > max_query_components: BadRequest('query is too large. may not", "Returns: A string userid derived from the email address. \"\"\" user_id_digest = md5.new(email.lower()).digest()", "(by datastore_index.Normalize) filters from query orders: normalized (by datastore_index.Normalize) orders from query max_query_components:", "the normalized filters of a query. orders: the normalized orders of a query.", "functions shared between the file and sqlite datastore stubs.\"\"\" import md5 from google.appengine.api", "used, the first order (if any) must be on X Args: query: query", "is not of the form '__namespace__'/name or '__namespace__'/_EMPTY_NAMESPACE_ID. Args: key: a key for", "a query. orders: the normalized orders of a query. Returns: The key range", "remaining_filters: raise BadRequestError( 'Only comparison filters on ' + key_prop + ' supported')", "filters on __key__ compare to a reference in the same app and namespace", "elif (op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL or op == datastore_pb.Query_Filter.EQUAL): if end_key is None or", "specified by key, or key if key is a false value. \"\"\" if", "ancestor - queries that are not too large (sum of filters, orders, ancestor" ]
[ "Generated by Django 3.2.9 on 2022-01-19 12:06 from django.db import migrations, models import", "12:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "[ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products', ),", "('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product',", "] operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart',", "by Django 3.2.9 on 2022-01-19 12:06 from django.db import migrations, models import django.db.models.deletion", "<filename>cart/migrations/0004_auto_20220119_1206.py # Generated by Django 3.2.9 on 2022-01-19 12:06 from django.db import migrations,", "class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [", "[ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shop.product'), ), ]", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'),", "Django 3.2.9 on 2022-01-19 12:06 from django.db import migrations, models import django.db.models.deletion class", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ]", "on 2022-01-19 12:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField(", "dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'),", "= [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shop.product'), ),", "2022-01-19 12:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "# Generated by Django 3.2.9 on 2022-01-19 12:06 from django.db import migrations, models", "'0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations =", "('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField(", "operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shop.product'),", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations", "3.2.9 on 2022-01-19 12:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop',", "'0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products', ), migrations.AddField( model_name='order', name='product', field=models.ForeignKey(default='',", "= [ ('shop', '0003_product_favourite_products'), ('cart', '0003_auto_20220119_0620'), ] operations = [ migrations.RemoveField( model_name='order', name='products'," ]
[ "from ..core.models import base_decorator from .models import datasync_backends datasync_backend = datasync_backends[\"us-east-1\"] mock_datasync =", "..core.models import base_decorator from .models import datasync_backends datasync_backend = datasync_backends[\"us-east-1\"] mock_datasync = base_decorator(datasync_backends)" ]
[ "self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\"))", "self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0))", "QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow)", "QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)", "QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self,", "self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit", "0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\")", "self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction())", "file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def", "QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5)", "self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20,", "self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow)", "self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\")", "self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout =", "self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50,", "QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget)", "self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\")", "self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole,", "self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\",", "self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget)", "coding: utf-8 -*- # Form implementation generated from reading ui file 'basic.ui' #", "\"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\"))", "self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import", "20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget)", "(0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\"))", "import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget", "self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)", "0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331,", "WARNING! All changes made in this file will be lost! from PyQt5 import", "= QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar", "# Form implementation generated from reading ui file 'basic.ui' # # Created by:", "= QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole,", "self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel =", "in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class", "self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\")", "will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self,", "self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200,", "QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel", "self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\",", "self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200,", "# -*- coding: utf-8 -*- # Form implementation generated from reading ui file", "self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn)", "QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget =", "UI code generator 5.6 # # WARNING! All changes made in this file", "self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile", "16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget)", "self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215))", "= QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar)", "self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0,", "\"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\"))", "= QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\")", "self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn", "self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow)", "= QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2)", "1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp =", "utf-8 -*- # Form implementation generated from reading ui file 'basic.ui' # #", "lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))", "self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen =", "file 'basic.ui' # # Created by: PyQt5 UI code generator 5.6 # #", "self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar =", "QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow)", "5.6 # # WARNING! All changes made in this file will be lost!", "code generator 5.6 # # WARNING! All changes made in this file will", "DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout", "QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn", "by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in", "self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\")", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400))", "self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200,", "self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from", "0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel", "\"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\"))", "self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget)", "20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget", "self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if", "= QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200,", "reading ui file 'basic.ui' # # Created by: PyQt5 UI code generator 5.6", "self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit)", "Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))", "self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout =", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")", "self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\")", "MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open", "self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20,", "5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\")", "setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget =", "# WARNING! All changes made in this file will be lost! from PyQt5", "16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget)", "self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox)", "\"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\",", "= QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine)", "\"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__", "implementation generated from reading ui file 'basic.ui' # # Created by: PyQt5 UI", "New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\",", "<filename>ui/ui.py # -*- coding: utf-8 -*- # Form implementation generated from reading ui", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will", "QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget)", "self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget)", "QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200,", "self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn)", "self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20)", "self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget)", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\"))", "self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300))", "QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)", "here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\",", "= QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50,", "self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\")", "PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732)", "\"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\",", "\"Quit\")) from Drawable import DrawableQLabel if __name__ == \"__main__\": import sys app =", "'basic.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING!", "= DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\")", "= QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\")", "self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\"))", "self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2", "self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\",", "50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20,", "import DrawableQLabel if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =", "16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn", "_translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\",", "0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\")", "self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1,", "a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\"))", "16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\")", "_translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\",", "__name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui =", "self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))", "300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget)", "this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object):", "self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\")", "self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel)", "QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0,", "\"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete", "def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget", "= QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole,", "self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout =", "= QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0))", "self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar)", "self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image", "QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200,", "self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel =", "MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\")", "self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel,", "self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))", "# Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes", "self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1,", "Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\",", "= QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0))", "self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\")", "\"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__ == \"__main__\": import sys", "self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200,", "\"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel", "self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2,", "= QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit =", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)", "MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1,", "20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget =", "16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget", "\"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\"))", "from Drawable import DrawableQLabel if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv)", "self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\")", "QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))", "QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear", "16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout =", "self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine =", "= QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200,", "400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)", "self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5,", "\"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow)", "self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\")", "self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215))", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose", "= QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0,", "self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50,", "self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\")", "\"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\",", "DrawableQLabel if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow()", "generator 5.6 # # WARNING! All changes made in this file will be", "ui file 'basic.ui' # # Created by: PyQt5 UI code generator 5.6 #", "# # Created by: PyQt5 UI code generator 5.6 # # WARNING! All", "Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\"))", "self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile =", "self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\",", "QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\")", "self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\")", "= QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole,", "QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn)", "import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show()", "= QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0))", "self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\")", "QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel", "self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel =", "= QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\")", "from reading ui file 'basic.ui' # # Created by: PyQt5 UI code generator", "QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2", "self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215))", "self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget)", "self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\"))", "QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\",", "\"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\",", "20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn", "Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made", "self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 =", "\"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable", "QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215))", "50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget)", "self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215))", "self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__ ==", "generated from reading ui file 'basic.ui' # # Created by: PyQt5 UI code", "self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__ == \"__main__\": import", "self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole,", "self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\"))", "self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value", "= QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave", "_translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New", "QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200,", "self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200,", "QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn", "self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20)", "# # WARNING! All changes made in this file will be lost! from", "self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout", "50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout = QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20,", "QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave)", "0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20,", "sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())", "self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget)", "self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5,", "self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 =", "QtWidgets.QVBoxLayout(self.imageWidget) self.imageLayout.setContentsMargins(20, 20, 20, 20) self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)", "QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction())", "= QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow)", "QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen", "self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn =", "QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave =", "self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox", "= QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def", "\"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\",", "\"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0,", "0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\")", "\"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save", "self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn)", "= QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\")", "self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar", "= QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\")", "will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find", "QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50)", "= QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction())", "20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn)", "self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn)", "1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget =", "from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331,", "self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn =", "QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500, 300)) self.imageWidget.setObjectName(\"imageWidget\") self.imageLayout", "QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox = QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0,", "self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\")", "QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200,", "QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel)", "self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar)", "QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar)", "QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800,", "0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.saveContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\")", "if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui", "All changes made in this file will be lost! from PyQt5 import QtCore,", "Form implementation generated from reading ui file 'basic.ui' # # Created by: PyQt5", "QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\")", "changes made in this file will be lost! from PyQt5 import QtCore, QtGui,", "self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0)) self.choosePointBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn =", "Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\")) self.thicknessComboBox.setItemText(0, _translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2,", "self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow):", "720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget)", "be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow):", "self.choosePointBtn.setObjectName(\"choosePointBtn\") self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget =", "MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\")", "self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.deleteContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.deleteContourBtn.setObjectName(\"deleteContourBtn\") self.controlPanelLayout.addWidget(self.deleteContourBtn) self.saveContourBtn =", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720))", "self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\")", "self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\"))", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\",", "0, 1331, 21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp", "self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine", "-1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget", "self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20, 20, 20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn =", "self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.deleteContourBtn.setMinimumSize(QtCore.QSize(200, 0))", "732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386,", "= QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit =", "\"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\", \"Open\")) self.actionSave.setText(_translate(\"MainWindow\",", "Drawable import DrawableQLabel if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow", "\"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a", "self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget)", "MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\") self.actionOpen_2 = QtWidgets.QAction(MainWindow)", "self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2,", "-*- # Form implementation generated from reading ui file 'basic.ui' # # Created", "self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\",", "PyQt5 UI code generator 5.6 # # WARNING! All changes made in this", "5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.thicknessLabel) self.thicknessComboBox =", "20) self.controlPanelLayout.setObjectName(\"controlPanelLayout\") self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn =", "self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) self.mainLayout.setContentsMargins(50, 50, 50, 50) self.mainLayout.setObjectName(\"mainLayout\") self.imageWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.imageWidget.setMinimumSize(QtCore.QSize(500,", "self.openFileBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200,", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(1331, 732) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1,", "self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget)", "_translate(\"MainWindow\", \"1\")) self.thicknessComboBox.setItemText(1, _translate(\"MainWindow\", \"2\")) self.thicknessComboBox.setItemText(2, _translate(\"MainWindow\", \"3\")) self.colorLabel.setText(_translate(\"MainWindow\", \"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\"))", "self.colorLabel) self.colorBtn = QtWidgets.QPushButton(self.contourOptionWidget) self.colorBtn.setText(\"\") self.colorBtn.setObjectName(\"colorBtn\") self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\")", "= QtWidgets.QWidget(self.controlPanelWidget) self.contourOptionWidget.setMinimumSize(QtCore.QSize(200, 0)) self.contourOptionWidget.setMaximumSize(QtCore.QSize(200, 16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5,", "QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar =", "= QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow) self.actionOpen.setObjectName(\"actionOpen\")", "-*- coding: utf-8 -*- # Form implementation generated from reading ui file 'basic.ui'", "self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole,", "\"Open\")) self.actionSave.setText(_translate(\"MainWindow\", \"Save\")) self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__ == \"__main__\":", "self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName(\"menuHelp\") MainWindow.setMenuBar(self.menubar) self.statusBar = QtWidgets.QStatusBar(MainWindow) self.statusBar.setObjectName(\"statusBar\") MainWindow.setStatusBar(self.statusBar) self.actionOpen = QtWidgets.QAction(MainWindow)", "self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget) self.thicknessLabel.setObjectName(\"thicknessLabel\") self.contourOptionLayout.setWidget(0,", "self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget)", "self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate =", "== \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow()", "QtWidgets.QComboBox(self.contourOptionWidget) self.thicknessComboBox.setObjectName(\"thicknessComboBox\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.thicknessComboBox.addItem(\"\") self.contourOptionLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.thicknessComboBox) self.colorLabel = QtWidgets.QLabel(self.contourOptionWidget) self.colorLabel.setObjectName(\"colorLabel\") self.contourOptionLayout.setWidget(1,", "self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\")) from Drawable import DrawableQLabel if __name__ == \"__main__\": import sys app", "QtWidgets.QWidget(MainWindow) self.centralwidget.setMinimumSize(QtCore.QSize(800, 400)) self.centralwidget.setObjectName(\"centralwidget\") self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 1386, 720)) self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\") self.mainLayout", "self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget) self.valueLine.setObjectName(\"valueLine\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.valueLine) self.controlPanelLayout.addWidget(self.contourOptionWidget) self.deleteContourBtn", "Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\", \"File\")) self.menuEdit.setTitle(_translate(\"MainWindow\", \"Edit\")) self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\")) self.actionOpen.setText(_translate(\"MainWindow\", \"New\")) self.actionOpen_2.setText(_translate(\"MainWindow\",", "self.contourOptionLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorBtn) self.label_2 = QtWidgets.QLabel(self.contourOptionWidget) self.label_2.setObjectName(\"label_2\") self.contourOptionLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.valueLine = QtWidgets.QLineEdit(self.contourOptionWidget)", "\"Color:\")) self.label_2.setText(_translate(\"MainWindow\", \"Value (0.0~1.0):\")) self.valueLine.setText(_translate(\"MainWindow\", \"0.5\")) self.deleteContourBtn.setText(_translate(\"MainWindow\", \"Delete Contour\")) self.saveContourBtn.setText(_translate(\"MainWindow\", \"Save Contour\")) self.menuFile.setTitle(_translate(\"MainWindow\",", "= QtWidgets.QPushButton(self.controlPanelWidget) self.openFileBtn.setMinimumSize(QtCore.QSize(200, 0)) self.openFileBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.openFileBtn.setObjectName(\"openFileBtn\") self.controlPanelLayout.addWidget(self.openFileBtn) self.choosePointBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.choosePointBtn.setMinimumSize(QtCore.QSize(200, 0))", "= QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\")) self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\"))", "self.controlPanelLayout.addWidget(self.choosePointBtn) self.findContourBtn = QtWidgets.QPushButton(self.controlPanelWidget) self.findContourBtn.setMinimumSize(QtCore.QSize(200, 0)) self.findContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.findContourBtn.setObjectName(\"findContourBtn\") self.controlPanelLayout.addWidget(self.findContourBtn) self.contourOptionWidget = QtWidgets.QWidget(self.controlPanelWidget)", "self.actionOpen_2 = QtWidgets.QAction(MainWindow) self.actionOpen_2.setObjectName(\"actionOpen_2\") self.actionSave = QtWidgets.QAction(MainWindow) self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen)", "16777215)) self.contourOptionWidget.setObjectName(\"contourOptionWidget\") self.contourOptionLayout = QtWidgets.QFormLayout(self.contourOptionWidget) self.contourOptionLayout.setContentsMargins(5, 5, 5, 5) self.contourOptionLayout.setObjectName(\"contourOptionLayout\") self.thicknessLabel = QtWidgets.QLabel(self.contourOptionWidget)", "self.imageLayout.setObjectName(\"imageLayout\") self.imageLabel = DrawableQLabel(self.imageWidget) self.imageLabel.setObjectName(\"imageLabel\") self.imageLayout.addWidget(self.imageLabel, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300,", "self.saveContourBtn.setMaximumSize(QtCore.QSize(200, 16777215)) self.saveContourBtn.setObjectName(\"saveContourBtn\") self.controlPanelLayout.addWidget(self.saveContourBtn) self.mainLayout.addWidget(self.controlPanelWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1331, 21))", "self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\")) self.thicknessLabel.setText(_translate(\"MainWindow\", \"Thickness:\"))", "appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\")) self.findContourBtn.setText(_translate(\"MainWindow\", \"Find Contour\"))", "self.actionSave.setObjectName(\"actionSave\") self.actionQuit = QtWidgets.QAction(MainWindow) self.actionQuit.setObjectName(\"actionQuit\") self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow)", "QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.mainLayout.addWidget(self.imageWidget) self.controlPanelWidget = QtWidgets.QWidget(self.horizontalLayoutWidget) self.controlPanelWidget.setMaximumSize(QtCore.QSize(300, 16777215)) self.controlPanelWidget.setObjectName(\"controlPanelWidget\") self.controlPanelLayout = QtWidgets.QVBoxLayout(self.controlPanelWidget) self.controlPanelLayout.setContentsMargins(20, 20,", "made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets", "self.imageLabel.setText(_translate(\"MainWindow\", \"Image will appear here\")) self.openFileBtn.setText(_translate(\"MainWindow\", \"Open New Image\")) self.choosePointBtn.setText(_translate(\"MainWindow\", \"Choose a Point\"))", "self.menuFile.addAction(self.actionOpen) self.menuFile.addAction(self.actionOpen_2) self.menuFile.addAction(self.actionSave) self.menuFile.addAction(self.actionQuit) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuEdit.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate", "21)) self.menubar.setObjectName(\"menubar\") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName(\"menuFile\") self.menuEdit = QtWidgets.QMenu(self.menubar) self.menuEdit.setObjectName(\"menuEdit\") self.menuHelp = QtWidgets.QMenu(self.menubar)" ]
[ "logging from datetime import datetime from typing import Any, Dict from flask import", "'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' %", "type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or", "import cross_origin from alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert", "except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status =", "Alert from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit", "= 'ack' elif state == 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else:", "Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from .", "webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return", "alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail", "datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack' elif state == 'closed': severity", "# 'documentation' is an optional field that you can use to customize #", "except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try:", "@cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise", "flask import current_app, g, jsonify, request from flask_cors import cross_origin from alerta.auth.decorators import", "'acknowledged': status = 'ack' elif state == 'closed': severity = 'ok' create_time =", "ApiError(str(e), 403) except Exception as e: raise ApiError(str(e), 500) text = 'stackdriver alert", "from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import", "logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident']", "alert sending a json if 'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content)", "'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment',", "from . import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification:", "'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e:", "e: raise ApiError(str(e), 403) except Exception as e: raise ApiError(str(e), 500) text =", "== 'acknowledged': status = 'ack' elif state == 'closed': severity = 'ok' create_time", "jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update of StackDriver notification failed',", "process_alert from alerta.utils.audit import write_audit_trail from . import webhooks LOG = logging.getLogger(__name__) JSON", "service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver", "from datetime import datetime from typing import Any, Dict from flask import current_app,", "cross_origin from alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert import", "alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert from", "Alert: incident = notification['incident'] state = incident['state'] # 'documentation' is an optional field", "permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums import", "= 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'],", "request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update", "= notification['incident'] state = incident['state'] # 'documentation' is an optional field that you", "import ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums import Scope from alerta.utils.api", "an optional field that you can use to customize # your alert sending", "datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status,", "from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from . import", "customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else:", "alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request)", "Any, Dict from flask import current_app, g, jsonify, request from flask_cors import cross_origin", "ApiError(str(e), 500) text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user,", "'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'],", "incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status", "alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums import Scope from", "elif state == 'acknowledged': status = 'ack' elif state == 'closed': severity =", "incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged':", "RejectException from alerta.models.alert import Alert from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip,", "@permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e),", "except RejectException as e: raise ApiError(str(e), 403) except Exception as e: raise ApiError(str(e),", "state = incident['state'] # 'documentation' is an optional field that you can use", "return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={", "= [] status = None create_time = None # type: ignore severity =", "def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400)", "can use to customize # your alert sending a json if 'documentation' in", "incident = notification['incident'] state = incident['state'] # 'documentation' is an optional field that", "'ack' elif state == 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity", "== 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return", "JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state", "def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state = incident['state'] # 'documentation'", ". import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON)", "to customize # your alert sending a json if 'documentation' in incident: try:", "JSON) -> Alert: incident = notification['incident'] state = incident['state'] # 'documentation' is an", "customize # your alert sending a json if 'documentation' in incident: try: content", "RejectException as e: raise ApiError(str(e), 403) except Exception as e: raise ApiError(str(e), 500)", "request from flask_cors import cross_origin from alerta.auth.decorators import permission from alerta.exceptions import ApiError,", "as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert =", "optional field that you can use to customize # your alert sending a", "= process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403) except Exception as e:", "import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from", "None # type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state", "== 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack' elif", "else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service,", "notification['incident'] state = incident['state'] # 'documentation' is an optional field that you can", "assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e),", "type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open':", "jsonify, request from flask_cors import cross_origin from alerta.auth.decorators import permission from alerta.exceptions import", "status = 'ack' elif state == 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at'])", "'documentation' is an optional field that you can use to customize # your", "Exception as e: raise ApiError(str(e), 500) text = 'stackdriver alert received via webhook'", "severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a", "if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state ==", "that you can use to customize # your alert sending a json if", "= json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service =", "resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert", "import current_app, g, jsonify, request from flask_cors import cross_origin from alerta.auth.decorators import permission", "'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'),", "= Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state =", "'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state", "raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True))", "group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>'", "ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException", "@webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError", "parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state = incident['state'] # 'documentation' is", "documentation content: '{}'\".format(incident['documentation'])) service = [] status = None create_time = None #", "Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status = None", "alerta.utils.audit import write_audit_trail from . import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str,", "from alerta.utils.audit import write_audit_trail from . import webhooks LOG = logging.getLogger(__name__) JSON =", "webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert:", "process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403) except Exception as e: raise", "= datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity,", "'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert',", "you can use to customize # your alert sending a json if 'documentation'", "try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation']))", "write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok',", "text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url']", "your alert sending a json if 'documentation' in incident: try: content = json.loads(incident['documentation']['content'])", "parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert)", "if 'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e:", "[] status = None create_time = None # type: ignore severity = incident.get('severity',", "create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'),", "= 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'),", "as e: raise ApiError(str(e), 403) except Exception as e: raise ApiError(str(e), 500) text", "e: raise ApiError(str(e), 500) text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received',", "use to customize # your alert sending a json if 'documentation' in incident:", "attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] },", "alert = process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403) except Exception as", "= incident['state'] # 'documentation' is an optional field that you can use to", "= logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident =", "content: '{}'\".format(incident['documentation'])) service = [] status = None create_time = None # type:", "LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident", "json if 'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as", "'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert(", "scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise", "import Any, Dict from flask import current_app, g, jsonify, request from flask_cors import", "'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack' elif state", "import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def parse_stackdriver(notification: JSON) ->", "= datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack' elif state == 'closed':", "severity = 'indeterminate' return Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group',", "incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin()", "from alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert", "target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver',", "incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif", "message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize),", "if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update of", "create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack' elif state ==", "incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)", "LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status = None create_time = None", "raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except", "from typing import Any, Dict from flask import current_app, g, jsonify, request from", "import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from . import webhooks LOG", "import datetime from typing import Any, Dict from flask import current_app, g, jsonify,", "'<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification", "= incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at'])", "add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from . import webhooks LOG =", "status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\"", "ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert", "Dict[str, Any] def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state = incident['state']", "= None # type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if", "as e: raise ApiError(str(e), 500) text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(),", "import write_audit_trail from . import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any]", "create_time = None # type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name'])", "= parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request,", "state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status = 'ack'", "datetime import datetime from typing import Any, Dict from flask import current_app, g,", "text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes,", "datetime from typing import Any, Dict from flask import current_app, g, jsonify, request", "500) text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers,", "-> Alert: incident = notification['incident'] state = incident['state'] # 'documentation' is an optional", "alerta.utils.api import add_remote_ip, assign_customer, process_alert from alerta.utils.audit import write_audit_trail from . import webhooks", "= assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as e: raise", "from flask import current_app, g, jsonify, request from flask_cors import cross_origin from alerta.auth.decorators", "in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation", ") @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except", "event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id,", "assign_customer, process_alert from alerta.utils.audit import write_audit_trail from . import webhooks LOG = logging.getLogger(__name__)", "severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate' return Alert( resource=incident['resource_name'],", "= None create_time = None # type: ignore severity = incident.get('severity', 'critical') if", "incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as e:", "return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update of StackDriver notification", "a json if 'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception", "elif state == 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity =", "Alert( resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId':", "Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS',", "origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver():", "None create_time = None # type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']:", "'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time,", "try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer =", "methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as", "via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert:", "add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403)", "state == 'acknowledged': status = 'ack' elif state == 'closed': severity = 'ok'", "json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = []", "% incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST'])", "write_audit_trail from . import webhooks LOG = logging.getLogger(__name__) JSON = Dict[str, Any] def", "event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert", "raise ApiError(str(e), 500) text = 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text,", "from alerta.models.alert import Alert from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer,", "g, jsonify, request from flask_cors import cross_origin from alerta.auth.decorators import permission from alerta.exceptions", "field that you can use to customize # your alert sending a json", "href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification )", "from alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums import Scope", "json import logging from datetime import datetime from typing import Any, Dict from", "# your alert sending a json if 'documentation' in incident: try: content =", "except Exception as e: raise ApiError(str(e), 500) text = 'stackdriver alert received via", "user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201", "environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'],", "import Alert from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert from", "as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status = None create_time", "e: raise ApiError(str(e), 400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert)", "if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status =", "403) except Exception as e: raise ApiError(str(e), 500) text = 'stackdriver alert received", "'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo':", "= 'stackdriver alert received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id,", "'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid", "service = [] status = None create_time = None # type: ignore severity", "incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content:", "state == 'closed': severity = 'ok' create_time = datetime.utcfromtimestamp(incident['ended_at']) else: severity = 'indeterminate'", "incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin',", "Dict from flask import current_app, g, jsonify, request from flask_cors import cross_origin from", "# type: ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state ==", "import logging from datetime import datetime from typing import Any, Dict from flask", "ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums import Scope from alerta.utils.api import", "service.append(incident['policy_name']) if state == 'open': create_time = datetime.utcfromtimestamp(incident['started_at']) elif state == 'acknowledged': status", "customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def", "incident['state'] # 'documentation' is an optional field that you can use to customize", "400) incomingAlert.customer = assign_customer(wanted=incomingAlert.customer) add_remote_ip(request, incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as", "incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert',", "from flask_cors import cross_origin from alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException", "incomingAlert) try: alert = process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403) except", "e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service = [] status = None create_time =", "import permission from alerta.exceptions import ApiError, RejectException from alerta.models.alert import Alert from alerta.models.enums", "id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update of StackDriver notification failed', 500)", "raise ApiError(str(e), 403) except Exception as e: raise ApiError(str(e), 500) text = 'stackdriver", "}, customer=incident.get('customer'), origin=incident.get('origin', 'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks)", "try: alert = process_alert(incomingAlert) except RejectException as e: raise ApiError(str(e), 403) except Exception", "ignore severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time", "'Stackdriver'), event_type='stackdriverAlert', create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try:", "'incidentId': incident['incident_id'], 'resourceId': incident['resource_id'], 'moreInfo': '<a href=\"%s\" target=\"_blank\">Stackdriver Console</a>' % incident['url'] }, customer=incident.get('customer'),", "create_time=create_time, raw_data=notification ) @webhooks.route('/webhooks/stackdriver', methods=['OPTIONS', 'POST']) @cross_origin() @permission(Scope.write_webhooks) def stackdriver(): try: incomingAlert =", "alerta.models.alert import Alert from alerta.models.enums import Scope from alerta.utils.api import add_remote_ip, assign_customer, process_alert", "resource=incident['resource_name'], event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'],", "flask_cors import cross_origin from alerta.auth.decorators import permission from alerta.exceptions import ApiError, RejectException from", "stackdriver(): try: incomingAlert = parse_stackdriver(request.get_json(force=True)) except ValueError as e: raise ApiError(str(e), 400) incomingAlert.customer", "current_app, g, jsonify, request from flask_cors import cross_origin from alerta.auth.decorators import permission from", "severity = incident.get('severity', 'critical') if incident['policy_name']: service.append(incident['policy_name']) if state == 'open': create_time =", "content = json.loads(incident['documentation']['content']) incident.update(content) except Exception as e: LOG.warning(\"Invalid documentation content: '{}'\".format(incident['documentation'])) service", "status = None create_time = None # type: ignore severity = incident.get('severity', 'critical')", "event=incident['condition_name'], environment=incident.get('environment', 'Production'), severity=severity, status=status, service=service, group=incident.get('group', 'Cloud'), text=incident['summary'], attributes={ 'incidentId': incident['incident_id'], 'resourceId':", "sending a json if 'documentation' in incident: try: content = json.loads(incident['documentation']['content']) incident.update(content) except", "typing import Any, Dict from flask import current_app, g, jsonify, request from flask_cors", "alert: return jsonify(status='ok', id=alert.id, alert=alert.serialize), 201 else: raise ApiError('insert or update of StackDriver", "Any] def parse_stackdriver(notification: JSON) -> Alert: incident = notification['incident'] state = incident['state'] #", "is an optional field that you can use to customize # your alert", "import json import logging from datetime import datetime from typing import Any, Dict", "'{}'\".format(incident['documentation'])) service = [] status = None create_time = None # type: ignore", "received via webhook' write_audit_trail.send(current_app._get_current_object(), event='webhook-received', message=text, user=g.user, customers=g.customers, scopes=g.scopes, resource_id=alert.id, type='alert', request=request) if" ]
[ "upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project +", "import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'),", "import datetime import random import string import bcrypt import os from django.db import", "max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model):", "models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file", "p.name) for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500,", "True, 'types': False} ] } def to_json(self): d = {'title': self.title, 'slug': self.slug,", "self.width, 'height': self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title =", "models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo',", "forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm", "name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class", "= models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp =", "photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class", "models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project + '/' + self.image_name", "'width': self.width, 'height': self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title", "<gh_stars>0 import datetime import random import string import bcrypt import os from django.db", "project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False,", "thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project + '/'", "class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False,", "max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False)", "from django.conf import settings from django.contrib import admin from django import forms from", "self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100)", "for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False)", "+ self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width':", "= models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags =", "EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig',", "import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu',", "max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name", "[ {'fields': ['slug'], 'unique': True, 'types': False} ] } def to_json(self): d =", "max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class", "max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source", "Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField()", "self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in self.photos]}", "bcrypt import os from django.db import models from django.conf import settings from django.contrib", "import string import bcrypt import os from django.db import models from django.conf import", "class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description =", "ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for", "= forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form =", "project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags", "EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = {", "'unique': True, 'types': False} ] } def to_json(self): d = {'title': self.title, 'slug':", "SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb',", "= Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100)", "= models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos =", "Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p in", "= Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300)", "', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo", "'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower() +", "widget=forms.Textarea, max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class", "models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return", "max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description =", "class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project =", "= ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name)", "models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES,", "'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False} ] } def to_json(self): d", "+ '/' + self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path':", "+ self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False,", "get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class", "('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower() + '/'", "return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model =", "tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [", "source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields': ['slug'], 'unique':", "class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name", "} def to_json(self): d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source,", "'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name =", "('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower()", "def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name)", "from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'),", "__unicode__(self): return self.project + '/' + self.image_name def to_json(self): return { 'image_name': self.image_name,", "import random import string import bcrypt import os from django.db import models from", "project_cover = models.BooleanField() def __unicode__(self): return self.project + '/' + self.image_name def to_json(self):", "from django import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct", "max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def", "meta = { 'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False} ] }", "for p in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\"", "d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for", "= models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False,", "hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\" admin.site.register(Profile, ProfileAdmin) admin.site.register(Project, ProjectAdmin) admin.site.register(Photo)", "ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin):", "'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in self.photos]} if", "return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model =", "self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in self.photos]} if hasattr(self,", "= { 'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False} ] } def", "django.contrib import admin from django import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES", "slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp", "= models.BooleanField() def __unicode__(self): return self.project + '/' + self.image_name def to_json(self): return", "photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta", "Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name", "django.db import models from django.conf import settings from django.contrib import admin from django", "False} ] } def to_json(self): d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'),", "max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info =", "form = ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500)", "datetime import random import string import bcrypt import os from django.db import models", "def to_json(self): d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos':", "def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta:", "models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField())", "model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name = models.CharField(blank=False,", "models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info", "{'fields': ['slug'], 'unique': True, 'types': False} ] } def to_json(self): d = {'title':", "= models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea,", "max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class", "Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description", "return self.project + '/' + self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path':", "= models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project + '/' +", "= (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook'))", "class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False,", "Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000)", "forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm", "= models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path)", "import bcrypt import os from django.db import models from django.conf import settings from", "personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form", "class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name", "editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project + '/' + self.image_name def", "\"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project =", "self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile", "'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class Post(models.Model): slug", "Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name =", "models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000)", "= ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def", "class Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return", "model = Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False,", "self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class Post(models.Model): slug =", "djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw',", "models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description", "def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta:", "other_field): return 'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model):", "models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields': ['slug'], 'unique': True, 'types':", "'photos': [p.to_json() for p in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return", "p in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\" admin.site.register(Profile,", "models from django.conf import settings from django.contrib import admin from django import forms", "personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True,", "= models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self):", "} \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project", "+ os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info", "self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200)", "class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile class", "self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in self.photos]} if hasattr(self, 'tags'): d['tags']", "'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ',", "'/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50)", "import os from django.db import models from django.conf import settings from django.contrib import", "import models from django.conf import settings from django.contrib import admin from django import", "django.conf import settings from django.contrib import admin from django import forms from djangotoolbox.fields", "= models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm):", "self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200)", "'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self,", "('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return", "admin from django import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du',", "'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in self.photos]} if hasattr(self, 'tags'):", "{ 'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False} ] } def to_json(self):", "= {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p", "self.source, 'photos': [p.to_json() for p in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags", "__unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model", "self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project", "class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p", "] } def to_json(self): d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source':", "Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200)", "= EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta =", "from django.db import models from django.conf import settings from django.contrib import admin from", "choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path =", "ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'),", "max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model):", "widget=forms.Textarea, max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class", "models.BooleanField() def __unicode__(self): return self.project + '/' + self.image_name def to_json(self): return {", "= models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields': ['slug'], 'unique': True,", "timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source =", "= ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields':", "'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/'", "ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields': ['slug'],", "to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height", "import admin from django import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES =", "models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path", "name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info = models.CharField(blank=False, max_length=1000) def", "def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height':", "form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name,", "max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100) file =", "settings from django.contrib import admin from django import forms from djangotoolbox.fields import EmbeddedModelField,", "image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()),", "= forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form =", "'types': False} ] } def to_json(self): d = {'title': self.title, 'slug': self.slug, 'date':", "Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' +", "= models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return", "{'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json() for p in", "forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'), ('tu', 'Tumblr", "ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True,", "Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field):", "os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False, max_length=50) personal_info =", "ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project = models.CharField(blank=False,", "'_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo =", "ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin):", "default=datetime.datetime.now) photos = EmbeddedModelField('Photo', blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50)", "if hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\" admin.site.register(Profile, ProfileAdmin) admin.site.register(Project, ProjectAdmin)", "in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover =", "from django.contrib import admin from django import forms from djangotoolbox.fields import EmbeddedModelField, ListField", "'height': self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False, max_length=200) title = models.CharField(blank=False,", "ProfileForm class Project(models.Model): name = models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self):", "max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now)", "return 'uploads/' + self.project.replace(' ', '_').lower() + '/' + os.path.basename(self.image_name) class Profile(models.Model): name", "+ '/' + os.path.basename(self.image_name) class Profile(models.Model): name = models.CharField(blank=False, max_length=100) photo = models.ImageField(blank=False,", "self.project + '/' + self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path,", "'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class", "__unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea, max_length=1000) class Meta: model", "class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project class", "['slug'], 'unique': True, 'types': False} ] } def to_json(self): d = {'title': self.title,", "{ 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\"", "max_length=50) meta = { 'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False} ]", "random import string import bcrypt import os from django.db import models from django.conf", "in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\" admin.site.register(Profile, ProfileAdmin)", "'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class Post(models.Model): slug = models.CharField(blank=False,", "p in Project.objects.all()), max_length=100) file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover", "[p.to_json() for p in self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return d", "django import forms from djangotoolbox.fields import EmbeddedModelField, ListField SOURCES = (('du', 'Direct Upload'),", "= models.CharField(blank=False, max_length=300) project = models.CharField(blank=False, choices=((p.name, p.name) for p in Project.objects.all()), max_length=100)", "self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width,", "os from django.db import models from django.conf import settings from django.contrib import admin", "string import bcrypt import os from django.db import models from django.conf import settings", "models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self): return self.project", "= models.CharField(blank=False, max_length=100) description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm):", "('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace('", "description = models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False,", "def __unicode__(self): return self.project + '/' + self.image_name def to_json(self): return { 'image_name':", "Meta: model = Profile class ProfileAdmin(admin.ModelAdmin): form = ProfileForm class Project(models.Model): name =", "title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False, default=datetime.datetime.now) photos", "'/' + self.image_name def to_json(self): return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path,", "file = models.ImageField(blank=False, upload_to=get_image_path) thumbnail_path = models.CharField(max_length=500, editable=False) project_cover = models.BooleanField() def __unicode__(self):", "'Facebook')) def get_image_path(self, other_field): return 'uploads/' + self.project.replace(' ', '_').lower() + '/' +", "self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height } \"\"\" class Post(models.Model):", "'source': self.source, 'photos': [p.to_json() for p in self.photos]} if hasattr(self, 'tags'): d['tags'] =", "= models.CharField(blank=False, max_length=1000) def __unicode__(self): return self.name class ProfileForm(forms.ModelForm): personal_info = forms.CharField(required=True, widget=forms.Textarea,", "self.photos]} if hasattr(self, 'tags'): d['tags'] = self.tags return d \"\"\" admin.site.register(Profile, ProfileAdmin) admin.site.register(Project,", "Project class ProjectAdmin(admin.ModelAdmin): form = ProjectForm class Photo(models.Model): image_name = models.CharField(blank=False, max_length=300) project", "import settings from django.contrib import admin from django import forms from djangotoolbox.fields import", "description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000) class Meta: model = Project class ProjectAdmin(admin.ModelAdmin): form", "models.CharField(blank=False, max_length=200) title = models.CharField(blank=False, max_length=200) project = models.CharField(blank=False, max_length=200) timestamp = models.DateTimeField(blank=False,", "blank=False) tags = ListField(models.CharField()) source = models.CharField(blank=False, choices=SOURCES, max_length=50) meta = { 'indexes':", "return { 'image_name': self.image_name, 'image_path': self.image_path, 'thumbnail_path': self.thumbnail_path, 'width': self.width, 'height': self.height }", "to_json(self): d = {'title': self.title, 'slug': self.slug, 'date': self.date.strftime('%Y-%m-%d'), 'source': self.source, 'photos': [p.to_json()", "choices=SOURCES, max_length=50) meta = { 'indexes': [ {'fields': ['slug'], 'unique': True, 'types': False}", "(('du', 'Direct Upload'), ('tu', 'Tumblr Post'), ('tw', 'Tweet'), ('ig', 'Instagram'), ('fb', 'Facebook')) def", "models.CharField(blank=True, max_length=500) def __unicode__(self): return self.name class ProjectForm(forms.ModelForm): description = forms.CharField(required=False, widget=forms.Textarea, max_length=1000)" ]
[]
[ "user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required = user_passes_test(lambda", "lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required = user_passes_test(lambda u: (u.is_authenticated() and u.is_superuser))", "import user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required =", "from django.conf import settings from django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x:", "import settings from django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(),", "django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required", "= user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required = user_passes_test(lambda u: (u.is_authenticated()", "settings from django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL,", "logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required = user_passes_test(lambda u:", "user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, ) superuser_required = user_passes_test(lambda u: (u.is_authenticated() and", "from django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x: not x.is_authenticated(), settings.LOGIN_REDIRECT_URL, )", "django.conf import settings from django.contrib.auth.decorators import user_passes_test logout_required = user_passes_test( lambda x: not" ]
[ "'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions", "# Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up)", "('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None:", "('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()',", "\"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), #", "splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name])", "\"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function =", "str, function_name: str, ) -> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree =", "\"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), #", "assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")',", "node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'),", "tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function ==", "parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called without splitting the", ") -> None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node", "('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'),", "function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree", "('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), #", "called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'),", "(made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str,", "= tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [", "('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'),", "== function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")',", "-> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value", "world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions", "# Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")',", "'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def", "in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call:", "# Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split(", "splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node,", "# Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up)", "modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'),", "# Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split(", "'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions", "('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()',", "up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name:", "= parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, ) assert", "builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)',", "'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) ->", "'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made", "parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, ) assert called_function", "'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test", "import pytest from wemake_python_styleguide.logic.tree import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions", "-> None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node =", "('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ])", "the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert", "str, ) -> None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call)", "the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name],", "[ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\",", "'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'),", "('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ])", "('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in", "functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions", "pytest from wemake_python_styleguide.logic.tree import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello", "tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ #", "test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called without splitting", "def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called splitting", "function_name: str, ) -> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call)", "in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")',", "function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'),", "'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)',", "= tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, ) assert called_function == function_name", "function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called without splitting the modules.\"\"\"", "('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, )", "'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in", "= functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin", "parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called splitting the modules.\"\"\"", "in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")',", "]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called", "parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call',", "\"\"\"Test given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function", "up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name:", "'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)',", "from wemake_python_styleguide.logic.tree import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")',", "'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)',", "Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree,", "'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'),", "without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node,", "node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, ) assert called_function ==", "in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call:", "str, function_name: str, ) -> None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree", "def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called without", ") -> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node =", "tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, )", "Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects", "(made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str,", "('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None:", "functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'),", "@pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)',", "wemake_python_styleguide.logic.tree import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'),", "('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules", "modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function", "= parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name", "'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'),", "]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called", "Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree,", "'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def", "[function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello", "'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made", "modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime(\"%H:%M\")', 'strftime'),", "('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, )", "str, ) -> None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node", "functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'),", "Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects", "None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value", "('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'),", "'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test", "modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True,", "('open(\"/tmp/file.txt\", \"r\")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'),", "test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: \"\"\"Test given_function_called splitting the", "None: \"\"\"Test given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function", "given_function_called splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(", "function_name: str, ) -> None: \"\"\"Test given_function_called without splitting the modules.\"\"\" tree =", "('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in", "given_function_called without splitting the modules.\"\"\" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function =", "objects ('dt.strftime(\"%H:%M\")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str,", "Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")', 'int'), ('bool(1)', 'bool'), ('open(\"/tmp/file.txt\", \"r\")', 'open'),", "called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple", "objects ('dt.strftime(\"%H:%M\")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str,", "import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print(\"Hello world!\")', 'print'), ('int(\"10\")',", "'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) ->" ]
[]
[ "method that returns 7 characters from a file, implement readN(n) which reads n", "a read7() method that returns 7 characters from a file, implement readN(n) which", "given a file with the content “Hello world”, three read7() returns “Hello w”,", "Microsoft. Using a read7() method that returns 7 characters from a file, implement", "Using a read7() method that returns 7 characters from a file, implement readN(n)", "implement readN(n) which reads n characters. For example, given a file with the", "which reads n characters. For example, given a file with the content “Hello", "the content “Hello world”, three read7() returns “Hello w”, “orld” and then “”.", "characters. For example, given a file with the content “Hello world”, three read7()", "a file with the content “Hello world”, three read7() returns “Hello w”, “orld”", "returns 7 characters from a file, implement readN(n) which reads n characters. For", "file with the content “Hello world”, three read7() returns “Hello w”, “orld” and", "file, implement readN(n) which reads n characters. For example, given a file with", "\"\"\"This problem was asked Microsoft. Using a read7() method that returns 7 characters", "content “Hello world”, three read7() returns “Hello w”, “orld” and then “”. \"\"\"", "For example, given a file with the content “Hello world”, three read7() returns", "with the content “Hello world”, three read7() returns “Hello w”, “orld” and then", "n characters. For example, given a file with the content “Hello world”, three", "7 characters from a file, implement readN(n) which reads n characters. For example,", "was asked Microsoft. Using a read7() method that returns 7 characters from a", "example, given a file with the content “Hello world”, three read7() returns “Hello", "from a file, implement readN(n) which reads n characters. For example, given a", "read7() method that returns 7 characters from a file, implement readN(n) which reads", "that returns 7 characters from a file, implement readN(n) which reads n characters.", "characters from a file, implement readN(n) which reads n characters. For example, given", "asked Microsoft. Using a read7() method that returns 7 characters from a file,", "readN(n) which reads n characters. For example, given a file with the content", "a file, implement readN(n) which reads n characters. For example, given a file", "reads n characters. For example, given a file with the content “Hello world”,", "problem was asked Microsoft. Using a read7() method that returns 7 characters from" ]
[ "defaultdict def generate_graph(word_list): graph = defaultdict(set) for word in word_list: for i in", "word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours = set()", "<reponame>vighnesh153/ds-algo from collections import defaultdict def generate_graph(word_list): graph = defaultdict(set) for word in", "end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word not in graph: return []", "== end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result) >", "in range(len(word)): new_word = word[:i] + '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word)", "set() for star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours", "[(begin_word, [begin_word])] visited = set() while len(queue) > 0: next_level = [] for", "range(len(word)): new_word = word[:i] + '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return", "graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list])", "i in range(len(word)): new_word = word[:i] + '*' + word[i + 1:] graph[new_word].add(word)", "= [] for word, path in queue: visited.add(word) for neighbour in get_neighbours(graph, word):", "0: next_level = [] for word, path in queue: visited.add(word) for neighbour in", "in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word,", "= word[:i] + '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def", "from collections import defaultdict def generate_graph(word_list): graph = defaultdict(set) for word in word_list:", "+ [end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result) > 0: break queue", "word): neighbours = set() for star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour)", "[begin_word])] visited = set() while len(queue) > 0: next_level = [] for word,", "word in word_list: for i in range(len(word)): new_word = word[:i] + '*' +", "for neighbour in get_neighbours(graph, word): if neighbour in visited: continue if neighbour ==", "generate_graph(word_list): graph = defaultdict(set) for word in word_list: for i in range(len(word)): new_word", "get_neighbours(graph, word): neighbours = set() for star_word in graph[word]: for neighbour in graph[star_word]:", "not in graph: return [] result = [] queue = [(begin_word, [begin_word])] visited", "graph def get_neighbours(graph, word): neighbours = set() for star_word in graph[word]: for neighbour", "end_word not in graph: return [] result = [] queue = [(begin_word, [begin_word])]", "= [(begin_word, [begin_word])] visited = set() while len(queue) > 0: next_level = []", "for i in range(len(word)): new_word = word[:i] + '*' + word[i + 1:]", "for star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def", "while len(queue) > 0: next_level = [] for word, path in queue: visited.add(word)", "= set() for star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return", "= [] queue = [(begin_word, [begin_word])] visited = set() while len(queue) > 0:", "in get_neighbours(graph, word): if neighbour in visited: continue if neighbour == end_word: result.append(path", "return neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word not", "neighbours = set() for star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word)", "defaultdict(set) for word in word_list: for i in range(len(word)): new_word = word[:i] +", "'*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours", "graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours = set() for star_word in graph[word]:", "in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word,", "*word_list]) if end_word not in graph: return [] result = [] queue =", "word_list: for i in range(len(word)): new_word = word[:i] + '*' + word[i +", "in graph: return [] result = [] queue = [(begin_word, [begin_word])] visited =", "star_word in graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word,", "new_word = word[:i] + '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph", "word): if neighbour in visited: continue if neighbour == end_word: result.append(path + [end_word])", "if neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour])) if", "in word_list: for i in range(len(word)): new_word = word[:i] + '*' + word[i", "neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if", "path + [neighbour])) if len(result) > 0: break queue = next_level return result", "[] queue = [(begin_word, [begin_word])] visited = set() while len(queue) > 0: next_level", "[] for word, path in queue: visited.add(word) for neighbour in get_neighbours(graph, word): if", "in queue: visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour in visited: continue", "+ word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours =", "end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result) > 0:", "graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours = set() for star_word in", "= generate_graph([begin_word, *word_list]) if end_word not in graph: return [] result = []", "queue: visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour in visited: continue if", "return [] result = [] queue = [(begin_word, [begin_word])] visited = set() while", "import defaultdict def generate_graph(word_list): graph = defaultdict(set) for word in word_list: for i", "solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word not in graph: return", "= defaultdict(set) for word in word_list: for i in range(len(word)): new_word = word[:i]", "graph = generate_graph([begin_word, *word_list]) if end_word not in graph: return [] result =", "continue if neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour]))", "visited = set() while len(queue) > 0: next_level = [] for word, path", "graph = defaultdict(set) for word in word_list: for i in range(len(word)): new_word =", "next_level = [] for word, path in queue: visited.add(word) for neighbour in get_neighbours(graph,", "neighbour in get_neighbours(graph, word): if neighbour in visited: continue if neighbour == end_word:", "def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word not in graph:", "def generate_graph(word_list): graph = defaultdict(set) for word in word_list: for i in range(len(word)):", "word_list): graph = generate_graph([begin_word, *word_list]) if end_word not in graph: return [] result", "graph[word]: for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list):", "for word in word_list: for i in range(len(word)): new_word = word[:i] + '*'", "[] result = [] queue = [(begin_word, [begin_word])] visited = set() while len(queue)", "if end_word not in graph: return [] result = [] queue = [(begin_word,", "= set() while len(queue) > 0: next_level = [] for word, path in", "[end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result) > 0: break queue =", "get_neighbours(graph, word): if neighbour in visited: continue if neighbour == end_word: result.append(path +", "word, path in queue: visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour in", "next_level.append((neighbour, path + [neighbour])) if len(result) > 0: break queue = next_level return", "set() while len(queue) > 0: next_level = [] for word, path in queue:", "visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour in visited: continue if neighbour", "> 0: next_level = [] for word, path in queue: visited.add(word) for neighbour", "for neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph", "+ 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours = set() for", "1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word): neighbours = set() for star_word", "word[:i] + '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph,", "return graph def get_neighbours(graph, word): neighbours = set() for star_word in graph[word]: for", "in visited: continue if neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path", "neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word not in", "path in queue: visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour in visited:", "visited: continue if neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path +", "def get_neighbours(graph, word): neighbours = set() for star_word in graph[word]: for neighbour in", "generate_graph([begin_word, *word_list]) if end_word not in graph: return [] result = [] queue", "len(queue) > 0: next_level = [] for word, path in queue: visited.add(word) for", "continue next_level.append((neighbour, path + [neighbour])) if len(result) > 0: break queue = next_level", "graph: return [] result = [] queue = [(begin_word, [begin_word])] visited = set()", "neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result)", "for word, path in queue: visited.add(word) for neighbour in get_neighbours(graph, word): if neighbour", "collections import defaultdict def generate_graph(word_list): graph = defaultdict(set) for word in word_list: for", "result = [] queue = [(begin_word, [begin_word])] visited = set() while len(queue) >", "+ '*' + word[i + 1:] graph[new_word].add(word) graph[word].add(new_word) return graph def get_neighbours(graph, word):", "if neighbour in visited: continue if neighbour == end_word: result.append(path + [end_word]) continue", "neighbour in visited: continue if neighbour == end_word: result.append(path + [end_word]) continue next_level.append((neighbour,", "result.append(path + [end_word]) continue next_level.append((neighbour, path + [neighbour])) if len(result) > 0: break", "neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph = generate_graph([begin_word, *word_list]) if end_word", "neighbour in graph[star_word]: neighbours.add(neighbour) neighbours.remove(word) return neighbours def solve(begin_word, end_word, word_list): graph =", "queue = [(begin_word, [begin_word])] visited = set() while len(queue) > 0: next_level =" ]
[ "import ABC class Index(ABC): \"\"\" Base class for an index in Grizzly. \"\"\"", "<gh_stars>1000+ from abc import ABC class Index(ABC): \"\"\" Base class for an index", "ABC class Index(ABC): \"\"\" Base class for an index in Grizzly. \"\"\" pass", "from abc import ABC class Index(ABC): \"\"\" Base class for an index in", "abc import ABC class Index(ABC): \"\"\" Base class for an index in Grizzly." ]
[ "[target.py or $CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property in the", "i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for", "dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or '' if files[0] in", "############################################################################### # Delayed imports import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import", "properties in Tango DB. Reset of QSettings files: > vaccagui --reset #The last", "args or '-h' in args or '-?' in args: print(__doc__) sys.exit(0) elif '--list'", "in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset'", "enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end = min(i for", "properties in this order. If empty, a DEFAULT profile is created pointing to", "= tangoFormatter except: pass ###################################################################### import vacca.utils as vu import vacca configs =", "files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import", "if os.path.isfile(config): config = os.path.abspath(config) elif config: try: import imp print('Loading %s as", "Launching vacca, loading configuration from a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...]", "properties or target.py Environment variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG :", "by imp.find_module()) If not set, default values are those set as VACCA properties", "min(i for i in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder", "print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l", "in args: files = [a for a in args if a not in", "in the database $OPTION=... can be used to override values declared properties or", "vacca Config file (or Property name) is obtained from shell args, then env,", "in this order. If empty, a DEFAULT profile is created pointing to default.py", "[remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0) elif '--help' in args or", "import TaurusGui ###################################################################### # Set tangoFormatter as default formatter try: from taurus.core.tango.util import", "directory to resources needed by target.py (target.py folder by default) VACCA_PATH : path", "in Tango DB. Reset of QSettings files: > vaccagui --reset #The last saved", "vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config", "os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t", "Config file (or Property name) is obtained from shell args, then env, then", "len(args)==1: sys.exit(0) elif '--reset' in args: inits = [a for a in os.walk(f).next()[2]", "configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files =", "__doc__ = \"\"\" The file vacca/main.py is the vaccagui launcher It creates the", "be removed > vaccagui --clean #All the .ini files will be removed Other", "args: print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils as vu configs =", "vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in", "#Prints this text > vaccagui --list #Prints available configurations \"\"\" if args and", "MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args", "os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if", "= config or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config: try: import", "vacca.config SUBMODULE print '-'*80 if '--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:])", "Property name) is obtained from shell args, then env, then properties in this", "fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args if a.startswith('-')]", "or vu.create_config_properties() if not files or not files[0]: files = [configs.keys()[0]] dirname =", "GENERATION IS IN vacca.config SUBMODULE print '-'*80 if '--panel' in options: import vacca.panel", "import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import", "data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config = config or files[0] if", "\\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] =", "vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus from taurus.core.util import argparse from", "elif config: try: import imp print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1]", "if a not in options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In", "files or not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config", "for a in args if '=' in a] if '--clean' in args: print('Removing", "load values from VACCA.$CONFIG property in the database $OPTION=... can be used to", "set, equivalent to passing target.py as argument VACCA_DIR : directory to resources needed", "vaccagui usage -------------- Launching vacca, loading configuration from a target.py: > vaccagui [target.py", "'--list' in args: import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0)", "vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args: inits =", "or '-?' in args: print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils as", "python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname = dirname", "> vaccagui --list #Prints available configurations \"\"\" if args and args[0].strip('- ') ==", "then env, then properties in this order. If empty, a DEFAULT profile is", "this call: >taurusgui vacca Config file (or Property name) is obtained from shell", "removed Other options: > vaccagui --helps #Prints this text > vaccagui --list #Prints", "a] if '--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1:", "= vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else:", "sys.argv[1:] files = [] __doc__ = \"\"\" The file vacca/main.py is the vaccagui", "for t in os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE FOR PANELS", "config = config or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config: try:", "target.py (target.py folder by default) VACCA_PATH : path to vacca module (initialized by", "target.py as argument VACCA_DIR : directory to resources needed by target.py (target.py folder", "#The last saved perspective will be removed > vaccagui --clean #All the .ini", "elif '--help' in args or '-h' in args or '-?' in args: print(__doc__)", "config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in", "from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass", "############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus from taurus.core.util", "if '--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0)", "print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus from taurus.core.util import argparse", "for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for", "and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines()", "vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files = [a for", "Set tangoFormatter as default formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import", "if set, equivalent to passing target.py as argument VACCA_DIR : directory to resources", "IN vacca.config SUBMODULE print '-'*80 if '--panel' in options: import vacca.panel ret =", "$CONFIG will load values from VACCA.$CONFIG property in the database $OPTION=... can be", "a in args if a.startswith('-')] values = [a for a in args if", "'-'*80 if '--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname =", "[$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property in the database $OPTION=... can", "vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config", "in a] if '--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if", "pass dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR =", "args if a not in options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80)", "if 'VACCA' in t[0])))) ### MAIN CODE FOR PANELS GENERATION IS IN vacca.config", "args: inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca", "target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG", "created pointing to default.py MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE", "VACCA_PATH : path to vacca module (initialized by imp.find_module()) If not set, default", "removed > vaccagui --clean #All the .ini files will be removed Other options:", "module (initialized by imp.find_module()) If not set, default values are those set as", "values from VACCA.$CONFIG property in the database $OPTION=... can be used to override", "empty, a DEFAULT profile is created pointing to default.py MAIN CODE FOR PANELS", "can be used to override values declared properties or target.py Environment variables (optional,", "this file emulates this call: >taurusgui vacca Config file (or Property name) is", "except: pass dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR", "dirname or data.get('VACCA_DIR',dirname) else: config = config or files[0] if os.path.isfile(config): config =", "TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils as vu import vacca", "configuration from a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load", "%s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname", "for a in args if a not in options+values] files = files or", "== 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i)", "or '' config = os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading %s'%files[0])", "print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0]))))", "t in os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE FOR PANELS GENERATION", "lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]']", "name) is obtained from shell args, then env, then properties in this order.", "sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options =", "VACCA.$CONFIG property in the database $OPTION=... can be used to override values declared", "vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show()", "taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ######################################################################", "formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter", "is the vaccagui launcher It creates the taurusgui environment and sets all environment", "folder by default) VACCA_PATH : path to vacca module (initialized by imp.find_module()) If", "all environment variables. vaccagui usage -------------- Launching vacca, loading configuration from a target.py:", "options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication()", "default.py MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback", "options: > vaccagui --helps #Prints this text > vaccagui --list #Prints available configurations", "those set as VACCA properties in Tango DB. Reset of QSettings files: >", "SUBMODULE print '-'*80 if '--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else:", "print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE FOR", "last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0) elif", "for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end", "as vu import vacca configs = vu.get_config_properties() or vu.create_config_properties() if not files or", "in args or '-h' in args or '-?' in args: print(__doc__) sys.exit(0) elif", "is created pointing to default.py MAIN CODE FOR PANELS GENERATION IS IN vacca.config", "gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_() taurus.info('Finished execution of TaurusGui') sys.exit(ret)", "sys.exit(0) if '--panel' not in args: files = [a for a in args", "sections['[General]'] end = min(i for i in sections.values() if i>begin) fo = open(filename,'w')", "os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading", "PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files", "vaccagui --reset #The last saved perspective will be removed > vaccagui --clean #All", "...\"%args) ############################################################################### # Delayed imports import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application", "'' config = os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading %s'%files[0]) data", "Other options: > vaccagui --helps #Prints this text > vaccagui --list #Prints available", "If not set, default values are those set as VACCA properties in Tango", "in vacca/main.py): VACCA_CONFIG : if set, equivalent to passing target.py as argument VACCA_DIR", "if len(args)==1: sys.exit(0) elif '--reset' in args: inits = [a for a in", "else: config = config or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config:", "usage -------------- Launching vacca, loading configuration from a target.py: > vaccagui [target.py or", "= config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA'", "TaurusGui ###################################################################### # Set tangoFormatter as default formatter try: from taurus.core.tango.util import tangoFormatter", "GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files =", "remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines) if", "= config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config = config", "\"\"\" Vacca runner; this file emulates this call: >taurusgui vacca Config file (or", "are those set as VACCA properties in Tango DB. Reset of QSettings files:", "is obtained from shell args, then env, then properties in this order. If", "configs = vu.get_config_properties() or vu.create_config_properties() if not files or not files[0]: files =", "len(args)==1: sys.exit(0) elif '--help' in args or '-h' in args or '-?' in", "\"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__ = \"\"\" The", "in args: print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils as vu configs", "'-h' in args or '-?' in args: print(__doc__) sys.exit(0) elif '--list' in args:", "'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for", "config: try: import imp print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname", "= vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname)", "###################################################################### import vacca.utils as vu import vacca configs = vu.get_config_properties() or vu.create_config_properties() if", "vu.get_config_properties() or vu.create_config_properties() if not files or not files[0]: files = [configs.keys()[0]] dirname", "file vacca/main.py is the vaccagui launcher It creates the taurusgui environment and sets", "= sections['[General]'] end = min(i for i in sections.values() if i>begin) fo =", "a not in options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s)", "(initialized by imp.find_module()) If not set, default values are those set as VACCA", "config or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config: try: import imp", "= os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):')", "from VACCA.$CONFIG property in the database $OPTION=... can be used to override values", "FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:]", "the vaccagui launcher It creates the taurusgui environment and sets all environment variables.", "CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args =", "or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG']", "files will be removed Other options: > vaccagui --helps #Prints this text >", "override values declared properties or target.py Environment variables (optional, will be initialized in", "a DEFAULT profile is created pointing to default.py MAIN CODE FOR PANELS GENERATION", "> vaccagui --clean #All the .ini files will be removed Other options: >", "values declared properties or target.py Environment variables (optional, will be initialized in vacca/main.py):", "args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines =", "vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables", "import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not", "Vacca runner; this file emulates this call: >taurusgui vacca Config file (or Property", "order. If empty, a DEFAULT profile is created pointing to default.py MAIN CODE", "in t[0])))) ### MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE print", "set as VACCA properties in Tango DB. Reset of QSettings files: > vaccagui", "'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_() taurus.info('Finished", "target.py Environment variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG : if set,", "DEFAULT profile is created pointing to default.py MAIN CODE FOR PANELS GENERATION IS", "imp.find_module()) If not set, default values are those set as VACCA properties in", "vaccagui --list #Prints available configurations \"\"\" if args and args[0].strip('- ') == 'help':", "CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80 if '--panel' in", "config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config =", "= os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or", "a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename", "-------------- Launching vacca, loading configuration from a target.py: > vaccagui [target.py or $CONFIG]", "if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a", "args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in", "files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config: try: import imp print('Loading %s", "TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_() taurus.info('Finished execution of TaurusGui')", "fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a", "VACCA_DIR : directory to resources needed by target.py (target.py folder by default) VACCA_PATH", "by default) VACCA_PATH : path to vacca module (initialized by imp.find_module()) If not", "ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication() gui = TaurusGui(None,", "'--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif", "print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus from taurus.core.util import", "if a.startswith('-')] values = [a for a in args if '=' in a]", "os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config", "= dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] =", "(or Property name) is obtained from shell args, then env, then properties in", "variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent to", "confname = 'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret =", "args if '=' in a] if '--clean' in args: print('Removing last vacca configs", "or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG", "path to vacca module (initialized by imp.find_module()) If not set, default values are", "print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname", "sets all environment variables. vaccagui usage -------------- Launching vacca, loading configuration from a", "loading configuration from a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will", "or '' if files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config =", "os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args: inits = [a for a", "= [] __doc__ = \"\"\" The file vacca/main.py is the vaccagui launcher It", "print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args:", "vacca, loading configuration from a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG", "taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### #", "set, default values are those set as VACCA properties in Tango DB. Reset", "configurations \"\"\" if args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename):", "will be initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent to passing target.py", "import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set", "VACCA properties in Tango DB. Reset of QSettings files: > vaccagui --reset #The", "files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or", "not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG')", "a.startswith('-')] values = [a for a in args if '=' in a] if", "os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args if a.startswith('-')] values = [a", "pointing to default.py MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\"", "import vacca.utils as vu import vacca configs = vu.get_config_properties() or vu.create_config_properties() if not", "or '-h' in args or '-?' in args: print(__doc__) sys.exit(0) elif '--list' in", "if '--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui'", "vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__ =", "(%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0) elif '--help' in args", "IS IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files = []", "i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end =", "in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in", "'-?' in args: print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils as vu", "files = [] __doc__ = \"\"\" The file vacca/main.py is the vaccagui launcher", "declared properties or target.py Environment variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG", "'--panel' not in args: files = [a for a in args if a", "be removed Other options: > vaccagui --helps #Prints this text > vaccagui --list", "= os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or '' if files[0] in configs:", "os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or ''", "imports import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui", "if len(args)==1: sys.exit(0) elif '--help' in args or '-h' in args or '-?'", "if files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config or", "by target.py (target.py folder by default) VACCA_PATH : path to vacca module (initialized", "# Delayed imports import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication", "taurusgui environment and sets all environment variables. vaccagui usage -------------- Launching vacca, loading", "[a for a in args if '=' in a] if '--clean' in args:", "argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter", "import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import", "###################################################################### # Set tangoFormatter as default formatter try: from taurus.core.tango.util import tangoFormatter from", "def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines)", "= dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for", "os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG =", "saved perspective will be removed > vaccagui --clean #All the .ini files will", "# Set tangoFormatter as default formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base", "configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname =", "from shell args, then env, then properties in this order. If empty, a", "IS IN vacca.config SUBMODULE print '-'*80 if '--panel' in options: import vacca.panel ret", "or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus", "Delayed imports import taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from", "--reset #The last saved perspective will be removed > vaccagui --clean #All the", "inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs", "= sys.argv[1:] files = [] __doc__ = \"\"\" The file vacca/main.py is the", "print '-'*80 if '--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname", "configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args: inits = [a", "or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config = config or files[0]", "= [a for a in args if a not in options+values] files =", "[a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename)", "= os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args if a.startswith('-')] values =", "then properties in this order. If empty, a DEFAULT profile is created pointing", "= os.path.abspath(config) elif config: try: import imp print('Loading %s as python module'%config) config", "passing target.py as argument VACCA_DIR : directory to resources needed by target.py (target.py", "config = os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading %s'%files[0]) data =", "--clean #All the .ini files will be removed Other options: > vaccagui --helps", "#All the .ini files will be removed Other options: > vaccagui --helps #Prints", "in options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ###############################################################################", "= vu.get_config_properties() or vu.create_config_properties() if not files or not files[0]: files = [configs.keys()[0]]", "re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end = min(i for i in", "= dirname or data.get('VACCA_DIR',dirname) else: config = config or files[0] if os.path.isfile(config): config", "Tango DB. Reset of QSettings files: > vaccagui --reset #The last saved perspective", "if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end = min(i for i", "[a for a in args if a not in options+values] files = files", "in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname", "launcher It creates the taurusgui environment and sets all environment variables. vaccagui usage", "sys.exit(0) elif '--reset' in args: inits = [a for a in os.walk(f).next()[2] if", "or data.get('VACCA_DIR',dirname) else: config = config or files[0] if os.path.isfile(config): config = os.path.abspath(config)", "config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config)", "= [a for a in args if a.startswith('-')] values = [a for a", "the taurusgui environment and sets all environment variables. vaccagui usage -------------- Launching vacca,", "a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1:", "vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files", "folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args if a.startswith('-')] values", "os.path.isfile(config): config = os.path.abspath(config) elif config: try: import imp print('Loading %s as python", "= os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items()", "be used to override values declared properties or target.py Environment variables (optional, will", "Environment variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent", "default formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT =", "taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils as vu", "not in options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args)", "shell args, then env, then properties in this order. If empty, a DEFAULT", "a in args if '=' in a] if '--clean' in args: print('Removing last", "args if a.startswith('-')] values = [a for a in args if '=' in", "### MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80 if", "$OPTION=... can be used to override values declared properties or target.py Environment variables", "data.get('VACCA_DIR',dirname) else: config = config or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif", "this text > vaccagui --list #Prints available configurations \"\"\" if args and args[0].strip('-", "imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config) or \\", "a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load values from", "python \"\"\" Vacca runner; this file emulates this call: >taurusgui vacca Config file", "len(lines) begin = sections['[General]'] end = min(i for i in sections.values() if i>begin)", "fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args if", "args, then env, then properties in this order. If empty, a DEFAULT profile", "= dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin =", "or $CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property in the database", "os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE FOR PANELS GENERATION IS IN", "sections['[End]'] = len(lines) begin = sections['[General]'] end = min(i for i in sections.values()", "available configurations \"\"\" if args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def", "= open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] =", "initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent to passing target.py as argument", "vaccagui launcher It creates the taurusgui environment and sets all environment variables. vaccagui", "if '=' in a] if '--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder)", "config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname) else: config = config or", "as default formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT", "The file vacca/main.py is the vaccagui launcher It creates the taurusgui environment and", "import imp print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config)", "in args if a not in options+values] files = files or [os.getenv('VACCA_CONFIG')] ###############################################################################", "call: >taurusgui vacca Config file (or Property name) is obtained from shell args,", "\"\"\" The file vacca/main.py is the vaccagui launcher It creates the taurusgui environment", "files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0])", "elif '--reset' in args: inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')]", "SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__ = \"\"\"", "import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils as vu import", "= files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports", "> vaccagui --helps #Prints this text > vaccagui --list #Prints available configurations \"\"\"", "sections = dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin", "tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils", "'--help' in args or '-h' in args or '-?' in args: print(__doc__) sys.exit(0)", "taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as default formatter try: from taurus.core.tango.util", "'=' in a] if '--clean' in args: print('Removing last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini')", "'--panel' in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app", "open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines)", "profile is created pointing to default.py MAIN CODE FOR PANELS GENERATION IS IN", ": path to vacca module (initialized by imp.find_module()) If not set, default values", "end = min(i for i in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:])", "vu import vacca configs = vu.get_config_properties() or vu.create_config_properties() if not files or not", "vaccagui --clean #All the .ini files will be removed Other options: > vaccagui", "will be removed > vaccagui --clean #All the .ini files will be removed", "or target.py Environment variables (optional, will be initialized in vacca/main.py): VACCA_CONFIG : if", "= min(i for i in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close()", "taurus from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui", "as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname =", "vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property in", "VACCA_CONFIG : if set, equivalent to passing target.py as argument VACCA_DIR : directory", "as argument VACCA_DIR : directory to resources needed by target.py (target.py folder by", "i in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/'", "or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca", "config = os.path.abspath(config) elif config: try: import imp print('Loading %s as python module'%config)", "= [a for a in args if '=' in a] if '--clean' in", "tangoFormatter as default formatter try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent", "os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits]", "vacca module (initialized by imp.find_module()) If not set, default values are those set", "values = [a for a in args if '=' in a] if '--clean'", "sys.exit(0) elif '--list' in args: import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:')", "import vacca configs = vu.get_config_properties() or vu.create_config_properties() if not files or not files[0]:", "#!/usr/bin/env python \"\"\" Vacca runner; this file emulates this call: >taurusgui vacca Config", "import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication() gui", "obtained from shell args, then env, then properties in this order. If empty,", "vacca configs = vu.get_config_properties() or vu.create_config_properties() if not files or not files[0]: files", "else: confname = 'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret", "') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections =", "= imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config) or", "= 'vaccagui' app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_()", "if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if", "from a target.py: > vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load values", "to vacca module (initialized by imp.find_module()) If not set, default values are those", "in inits] if len(args)==1: sys.exit(0) elif '--help' in args or '-h' in args", "this order. If empty, a DEFAULT profile is created pointing to default.py MAIN", "will be removed Other options: > vaccagui --helps #Prints this text > vaccagui", "app = TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_() taurus.info('Finished execution", "begin = sections['[General]'] end = min(i for i in sections.values() if i>begin) fo", "os.path.abspath(config) elif config: try: import imp print('Loading %s as python module'%config) config =", "--helps #Prints this text > vaccagui --list #Prints available configurations \"\"\" if args", "in args or '-?' in args: print(__doc__) sys.exit(0) elif '--list' in args: import", "FOR PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80 if '--panel' in options:", "print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils as vu configs = vu.get_config_properties()", "= os.getenv('VACCA_CONFIG') or '' if files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0])", "t[0])))) ### MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80", "configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0) elif '--help' in", "MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80 if '--panel'", "vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0) elif '--help'", "QSettings files: > vaccagui --reset #The last saved perspective will be removed >", "dirname = os.path.dirname(config) except: pass dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1)", "(optional, will be initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent to passing", "IN vacca.config SUBMODULE \"\"\" import sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__", "sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__ = \"\"\" The file vacca/main.py", "file (or Property name) is obtained from shell args, then env, then properties", "inits] if len(args)==1: sys.exit(0) elif '--help' in args or '-h' in args or", "needed by target.py (target.py folder by default) VACCA_PATH : path to vacca module", "last saved perspective will be removed > vaccagui --clean #All the .ini files", "[] __doc__ = \"\"\" The file vacca/main.py is the vaccagui launcher It creates", "sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l in", "for filename in inits] if len(args)==1: sys.exit(0) elif '--help' in args or '-h'", "creates the taurusgui environment and sets all environment variables. vaccagui usage -------------- Launching", "vacca.utils as vu import vacca configs = vu.get_config_properties() or vu.create_config_properties() if not files", "args: files = [a for a in args if a not in options+values]", "in args: import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if", "\"\"\" if args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename)", "tangoFormatter except: pass ###################################################################### import vacca.utils as vu import vacca configs = vu.get_config_properties()", "taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as default", ".ini files will be removed Other options: > vaccagui --helps #Prints this text", "emulates this call: >taurusgui vacca Config file (or Property name) is obtained from", "files = [a for a in args if a not in options+values] files", "print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files = [a for a", ": directory to resources needed by target.py (target.py folder by default) VACCA_PATH :", "Reset of QSettings files: > vaccagui --reset #The last saved perspective will be", "from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils as", "'' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment", "default values are those set as VACCA properties in Tango DB. Reset of", "print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass", "> vaccagui --reset #The last saved perspective will be removed > vaccagui --clean", "files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or ''", "in options: import vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app =", "TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as default formatter try:", "Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0])))) ###", "PANELS GENERATION IS IN vacca.config SUBMODULE print '-'*80 if '--panel' in options: import", "default) VACCA_PATH : path to vacca module (initialized by imp.find_module()) If not set,", "open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a in args", "dirname = dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR']", "= \"\"\" The file vacca/main.py is the vaccagui launcher It creates the taurusgui", "print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections = dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l))", "filename in inits] if len(args)==1: sys.exit(0) elif '--help' in args or '-h' in", "or files[0] if os.path.isfile(config): config = os.path.abspath(config) elif config: try: import imp print('Loading", "<gh_stars>1-10 #!/usr/bin/env python \"\"\" Vacca runner; this file emulates this call: >taurusgui vacca", "if args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines", "not files or not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or ''", "(target.py folder by default) VACCA_PATH : path to vacca module (initialized by imp.find_module())", "module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except: pass dirname = dirname or", "options = [a for a in args if a.startswith('-')] values = [a for", "> vaccagui [target.py or $CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property", "sys.exit(0) elif '--help' in args or '-h' in args or '-?' in args:", "variables. vaccagui usage -------------- Launching vacca, loading configuration from a target.py: > vaccagui", "files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed", "text > vaccagui --list #Prints available configurations \"\"\" if args and args[0].strip('- ')", "= [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or '' if", "in os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE FOR PANELS GENERATION IS", "if '--panel' not in args: files = [a for a in args if", "a in args if a not in options+values] files = files or [os.getenv('VACCA_CONFIG')]", "values are those set as VACCA properties in Tango DB. Reset of QSettings", "runner; this file emulates this call: >taurusgui vacca Config file (or Property name)", "import sys,os,re,time,imp,traceback args = sys.argv[1:] files = [] __doc__ = \"\"\" The file", "args[0].strip('- ') == 'help': print(__doc__) sys.exit(0) def remove_last_config(filename): print('vacca.remove_last_config(%s)'%filename) lines = open(filename).readlines() sections", "to override values declared properties or target.py Environment variables (optional, will be initialized", "vacca.panel ret = vacca.panel.main(args[:1]+args[-1:]) else: confname = 'vaccagui' app = TaurusApplication() gui =", "to resources needed by target.py (target.py folder by default) VACCA_PATH : path to", "not set, default values are those set as VACCA properties in Tango DB.", "dirname = dirname or data.get('VACCA_DIR',dirname) else: config = config or files[0] if os.path.isfile(config):", "resources needed by target.py (target.py folder by default) VACCA_PATH : path to vacca", "equivalent to passing target.py as argument VACCA_DIR : directory to resources needed by", ">taurusgui vacca Config file (or Property name) is obtained from shell args, then", "in args if '=' in a] if '--clean' in args: print('Removing last vacca", ": if set, equivalent to passing target.py as argument VACCA_DIR : directory to", "will load values from VACCA.$CONFIG property in the database $OPTION=... can be used", "vacca/main.py): VACCA_CONFIG : if set, equivalent to passing target.py as argument VACCA_DIR :", "DB. Reset of QSettings files: > vaccagui --reset #The last saved perspective will", "as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args:", "If empty, a DEFAULT profile is created pointing to default.py MAIN CODE FOR", "'VACCA' in t[0])))) ### MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE", "argument VACCA_DIR : directory to resources needed by target.py (target.py folder by default)", "= len(lines) begin = sections['[General]'] end = min(i for i in sections.values() if", "database $OPTION=... can be used to override values declared properties or target.py Environment", "'--reset' in args: inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing", "property in the database $OPTION=... can be used to override values declared properties", "try: import imp print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname =", "data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or data.get('VACCA_DIR',dirname)", "[a for a in args if a.startswith('-')] values = [a for a in", "vacca/main.py is the vaccagui launcher It creates the taurusgui environment and sets all", "(vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0])))) ### MAIN CODE", "= [a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last vacca configs (%s)'%inits)", "from taurus.core.util import argparse from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ######################################################################", "try: from taurus.core.tango.util import tangoFormatter from taurus.qt.qtgui.base import TaurusBaseComponent TaurusBaseComponent.FORMAT = tangoFormatter except:", "args = sys.argv[1:] files = [] __doc__ = \"\"\" The file vacca/main.py is", "#Prints available configurations \"\"\" if args and args[0].strip('- ') == 'help': print(__doc__) sys.exit(0)", "or not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config =", "$CONFIG] [$OPTION=...] $CONFIG will load values from VACCA.$CONFIG property in the database $OPTION=...", "args: import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel'", "vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in", "= TaurusApplication() gui = TaurusGui(None, confname=confname) gui.show() ret = app.exec_() taurus.info('Finished execution of", "print('Removing last vacca configs (%s)'%inits) [remove_last_config(folder+filename) for filename in inits] if len(args)==1: sys.exit(0)", "'' if files[0] in configs: print('Loading %s'%files[0]) data = vu.get_config_properties(files[0]) config = config", "TaurusBaseComponent.FORMAT = tangoFormatter except: pass ###################################################################### import vacca.utils as vu import vacca configs", "options+values] files = files or [os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### #", "dirname vu.VACCA_CONFIG = os.environ['VACCA_CONFIG'] = config print('Vacca Environment variables (vacca.main):') print('\\n'.join(map(str,(t for t", "= vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files = [a", "and sets all environment variables. vaccagui usage -------------- Launching vacca, loading configuration from", "vaccagui --helps #Prints this text > vaccagui --list #Prints available configurations \"\"\" if", "in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]'] end = min(i", "imp print('Loading %s as python module'%config) config = imp.find_module(config.replace('.','/'))[1] dirname = os.path.dirname(config) except:", "dict((l.strip(),i) for i,l in enumerate(lines) if re.match('[\\[][a-zA-Z]*[\\]]',l)) sections['[End]'] = len(lines) begin = sections['[General]']", "as VACCA properties in Tango DB. Reset of QSettings files: > vaccagui --reset", "environment and sets all environment variables. vaccagui usage -------------- Launching vacca, loading configuration", "dirname or os.path.dirname(config) or \\ vu.get_vacca_property('VACCA_DIR',extract=1) or '' vu.VACCA_DIR = os.environ['VACCA_DIR'] = dirname", "except: pass ###################################################################### import vacca.utils as vu import vacca configs = vu.get_config_properties() or", "for i in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder =", "in sections.values() if i>begin) fo = open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options", "in args if a.startswith('-')] values = [a for a in args if '='", "[os.getenv('VACCA_CONFIG')] ############################################################################### print('-'*80) print(\"In vacca.main(%s) ...\"%args) ############################################################################### # Delayed imports import taurus from", "if not files or not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or", "import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as default formatter", "the .ini files will be removed Other options: > vaccagui --helps #Prints this", "used to override values declared properties or target.py Environment variables (optional, will be", "environment variables. vaccagui usage -------------- Launching vacca, loading configuration from a target.py: >", "print('\\n\\t'+'\\n\\t'.join(configs)+'\\n') sys.exit(0) if '--panel' not in args: files = [a for a in", "be initialized in vacca/main.py): VACCA_CONFIG : if set, equivalent to passing target.py as", "env, then properties in this order. If empty, a DEFAULT profile is created", "%s'%files[0]) data = vu.get_config_properties(files[0]) config = config or data.get('VACCA_CONFIG',files[0]) dirname = dirname or", "files: > vaccagui --reset #The last saved perspective will be removed > vaccagui", "variables (vacca.main):') print('\\n'.join(map(str,(t for t in os.environ.items() if 'VACCA' in t[0])))) ### MAIN", "of QSettings files: > vaccagui --reset #The last saved perspective will be removed", "--list #Prints available configurations \"\"\" if args and args[0].strip('- ') == 'help': print(__doc__)", "to default.py MAIN CODE FOR PANELS GENERATION IS IN vacca.config SUBMODULE \"\"\" import", "the database $OPTION=... can be used to override values declared properties or target.py", "= open(filename,'w') fo.writelines(lines[:begin]+lines[end:]) fo.close() folder = os.getenv('HOME')+'/.config/VACCA/' options = [a for a in", "from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as default formatter try: from", "[configs.keys()[0]] dirname = os.getenv('VACCA_DIR') or '' config = os.getenv('VACCA_CONFIG') or '' if files[0]", "elif '--list' in args: import vacca.utils as vu configs = vu.get_config_properties() print('\\nVaccaConfigs:') print('\\n\\t'+'\\n\\t'.join(configs)+'\\n')", "not in args: files = [a for a in args if a not", "(%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args: inits = [a for", "It creates the taurusgui environment and sets all environment variables. vaccagui usage --------------", "last vacca configs (%s/*.ini)'%folder) os.remove(folder+'*.ini') if len(args)==1: sys.exit(0) elif '--reset' in args: inits", "args or '-?' in args: print(__doc__) sys.exit(0) elif '--list' in args: import vacca.utils", "from taurus.qt.qtgui.application import TaurusApplication from taurus.qt.qtgui.taurusgui import TaurusGui ###################################################################### # Set tangoFormatter as", "in args: inits = [a for a in os.walk(f).next()[2] if a.endswith('.ini')] print('Removing last", "to passing target.py as argument VACCA_DIR : directory to resources needed by target.py", "for a in args if a.startswith('-')] values = [a for a in args", "vu.create_config_properties() if not files or not files[0]: files = [configs.keys()[0]] dirname = os.getenv('VACCA_DIR')", "pass ###################################################################### import vacca.utils as vu import vacca configs = vu.get_config_properties() or vu.create_config_properties()", "perspective will be removed > vaccagui --clean #All the .ini files will be", "file emulates this call: >taurusgui vacca Config file (or Property name) is obtained" ]
[ "STFT_frames = [] for j in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame", "output a numpy array with frames exreacted from the call of voice. The", "starts with a dial tone customer speaks first, else representative. Input: path to", "application. \"\"\" import numpy as np from configuration import get_config import scipy import", "dial tone customer speaks first, else representative. Input: path to a call Output:", "extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and extract spectrograms binned in mel-binning", "= np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap STFT_frames = [] for", "20) # lower bound of utterance length - below that discard utter_min_len =", "overlap STFT_frames = [] for j in range(0, spect.shape[1], int(.12/config.hop)): if j +", "utterances and extract spectrograms binned in mel-binning for each frame Input: list of", "interval in intervals: # Check that utterance length is sufficient if (interval[1]-interval[0]) >", "in mel-binning for each frame Input: list of numpy arrays, each of them", "the call \"\"\" # extract audio call_audio, _ = librosa.core.load(call_file, config.sr) # split", "list of frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT", "numpy arrays, each of them representing a single speech utterance Output: list of", "windows with 50% overlap STFT_frames = [] for j in range(0, spect.shape[1], int(.12/config.hop)):", "representing a spectrogram of a single speech utterance \"\"\" spectrograms_list = [] #", "each of them representing a single speech utterance list of numpy array representing", "STFT windows with 50% overlap STFT_frames = [] for j in range(0, spect.shape[1],", "spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment, split it into frames according", "extract audio call_audio, _ = librosa.core.load(call_file, config.sr) # split the audio to voice", "= 20) # lower bound of utterance length - below that discard utter_min_len", "file path Output: list of numpy arrays, each of them representing a single", "utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr))", "> utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\"", "exreacted from the call of voice. The frames are utterances of minimal length,", "frame of sound measure 1.5 sec and look for 400Hz tone nof_frames =", "for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr),", "each of them representing a single speech utterance Output: list of numpy arrays,", "Input: list of numpy arrays, each of them representing a single speech utterance", "utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list", "speaks first in the call according to phone tone. If a call starts", "import numpy as np from configuration import get_config import scipy import librosa config", "1 def print_call_type(call_type): \"\"\" Used to print call type Input: call type int", "the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this means customer is", "utterances_list = [] utterances_timestamps = [] for interval in intervals: # Check that", "FIRST_SPEAKER_CUSTOMER else: # this means customer is calling to the call center, represntative", "according to size config.tisv_frame Input: seg - audio segment Output: list of frames", "If a call starts with a dial tone customer speaks first, else representative.", "librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag =", "(interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list):", "utterance \"\"\" spectrograms_list = [] # iterate on all utterances, extract spectrogram from", "parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1", "FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\"", "- below that discard utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr", "* config.sr utterances_list = [] utterances_timestamps = [] for interval in intervals: #", "return FIRST_SPEAKER_CUSTOMER else: # this means customer is calling to the call center,", "is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first in the call according", "Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" #", "for interval in intervals: # Check that utterance length is sufficient if (interval[1]-interval[0])", "import get_config import scipy import librosa config = get_config() # get arguments from", "# frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected!", "speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy", "call_audio, _ = librosa.core.load(call_file, config.sr) # split the audio to voice and no-voice", "amplitude intervals = librosa.effects.split(call_audio, top_db = 20) # lower bound of utterance length", "voice. The frames are utterances of minimal length, split by 20DB limit Input:", "arrays, each of them representing a single speech utterance Output: list of numpy", "array representing the timestamp start and end of each utterance in the call", "spectral magnitude f = np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) ==", "np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): #", "first \"\"\" # from the first frame of sound measure 1.5 sec and", "= librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis,", "and extract spectrograms binned in mel-binning for each frame Input: list of numpy", "look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr)", "into frames according to size config.tisv_frame Input: seg - audio segment Output: list", "\"\"\" # from the first frame of sound measure 1.5 sec and look", "= np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr, nof_frames) # frequency variable", "= [] # iterate on all utterances, extract spectrogram from each for utterance", "call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\"", "below that discard utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr utterances_list", "librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect)", "measure 1.5 sec and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio,", "spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given", "Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap STFT_frames", "first frame of sound measure 1.5 sec and look for 400Hz tone nof_frames", "of each utterance in the call \"\"\" # extract audio call_audio, _ =", "print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first in the", "= [] for j in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame <", "customer is calling to the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def", "of utterances and extract spectrograms binned in mel-binning for each frame Input: list", "Input: call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\")", "\"\"\" Used to print call type Input: call type int \"\"\" if call_type", "#Get config.tisv_frame STFT windows with 50% overlap STFT_frames = [] for j in", "of numpy arrays, each of them representing a single speech utterance Output: list", "[] for interval in intervals: # Check that utterance length is sufficient if", "= librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db =", "intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) #", "is customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks", "FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to print call type Input: call", "seg - audio segment Output: list of frames \"\"\" # Extrct spectrogram spect", "Check that utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr),", "path to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor", "represntative is calling to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: #", "Used to print call type Input: call type int \"\"\" if call_type ==", "from the first frame of sound measure 1.5 sec and look for 400Hz", "= librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect =", "20DB limit Input: audio file path Output: list of numpy arrays, each of", "the first frame of sound measure 1.5 sec and look for 400Hz tone", "call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else:", "intervals = librosa.effects.split(call_audio, top_db = 20) # lower bound of utterance length -", "speaks first return FIRST_SPEAKER_CUSTOMER else: # this means customer is calling to the", "representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE", "call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker is representative\") def", "each for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window *", "from parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE =", "config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2 mel_basis", "to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks", "a single speech utterance \"\"\" spectrograms_list = [] # iterate on all utterances,", "= 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f =", "each frame Input: list of numpy arrays, each of them representing a single", "call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file,", "else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first in", "20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f = np.linspace(0,", "means represntative is calling to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else:", "start and end of each utterance in the call \"\"\" # extract audio", "the call according to phone tone. If a call starts with a dial", "call \"\"\" # extract audio call_audio, _ = librosa.core.load(call_file, config.sr) # split the", "of sound measure 1.5 sec and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5,", "each of them representing a spectrogram of a single speech utterance \"\"\" spectrograms_list", "frames according to size config.tisv_frame Input: seg - audio segment Output: list of", "according to amplitude intervals = librosa.effects.split(call_audio, top_db = 20) # lower bound of", "if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker is representative\")", "config.sr)) spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log", "to amplitude intervals = librosa.effects.split(call_audio, top_db = 20) # lower bound of utterance", "of minimal length, split by 20DB limit Input: audio file path Output: list", "Given an audio segment, split it into frames according to size config.tisv_frame Input:", "with 50% overlap STFT_frames = [] for j in range(0, spect.shape[1], int(.12/config.hop)): if", "speaks first, else representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if customer", "n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) +", "frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this", "tone customer speaks first, else representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER", "representetor speaks first \"\"\" # from the first frame of sound measure 1.5", "utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)]))", "** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances", "config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft)", "first, else representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks", "code for signal processing module of application. \"\"\" import numpy as np from", "limit Input: audio file path Output: list of numpy arrays, each of them", "in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop *", "customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first", "Output: list of numpy arrays, each of them representing a spectrogram of a", "frames are utterances of minimal length, split by 20DB limit Input: audio file", "range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]: STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame])) else: break return", "400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative is calling to", "+ config.window) * config.sr utterances_list = [] utterances_timestamps = [] for interval in", "module of application. \"\"\" import numpy as np from configuration import get_config import", "config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing", "customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this means customer is calling to", "utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and extract spectrograms", "extract spectrograms binned in mel-binning for each frame Input: list of numpy arrays,", "a numpy array with frames exreacted from the call of voice. The frames", "0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to print call type Input:", "\"\"\" Identify who speaks first in the call according to phone tone. If", "binned in mel-binning for each frame Input: list of numpy arrays, each of", "single speech utterance \"\"\" spectrograms_list = [] # iterate on all utterances, extract", "contains the code for signal processing module of application. \"\"\" import numpy as", "hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof)", "librosa.effects.split(call_audio, top_db = 20) # lower bound of utterance length - below that", "audio segment, split it into frames according to size config.tisv_frame Input: seg -", "import scipy import librosa config = get_config() # get arguments from parser #", "represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a", "call of voice. The frames are utterances of minimal length, split by 20DB", "list of numpy arrays, each of them representing a spectrogram of a single", "array with frames exreacted from the call of voice. The frames are utterances", "type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First", "identify_call_type(call_file): \"\"\" Identify who speaks first in the call according to phone tone.", "spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect", "# dialing tone detected! this means represntative is calling to the customer, customer", "* config.hop + config.window) * config.sr utterances_list = [] utterances_timestamps = [] for", "calling to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this means", "= librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral", "utterances of minimal length, split by 20DB limit Input: audio file path Output:", "of numpy array representing the timestamp start and end of each utterance in", "constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type):", "config.window) * config.sr utterances_list = [] utterances_timestamps = [] for interval in intervals:", "def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy array with frames exreacted", "return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment, split it into frames", "f = np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and", "[] # iterate on all utterances, extract spectrogram from each for utterance in", "output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used", "representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first in the call according to", "1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment, split it", "to size config.tisv_frame Input: seg - audio segment Output: list of frames \"\"\"", "= np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram", "tone. If a call starts with a dial tone customer speaks first, else", "split by 20DB limit Input: audio file path Output: list of numpy arrays,", "extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy array with frames exreacted from", "librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude", "customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this means customer is calling", "in the call according to phone tone. If a call starts with a", "for signal processing module of application. \"\"\" import numpy as np from configuration", "of them representing a spectrogram of a single speech utterance \"\"\" spectrograms_list =", "arguments from parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE", "\"\"\" This file contains the code for signal processing module of application. \"\"\"", "\"\"\" import numpy as np from configuration import get_config import scipy import librosa", "Output: list of numpy arrays, each of them representing a single speech utterance", "and no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db = 20) # lower", "lower bound of utterance length - below that discard utter_min_len = (config.tisv_frame *", "list of utterances and extract spectrograms binned in mel-binning for each frame Input:", "+ 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment, split", "magnitude f = np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400", "scipy import librosa config = get_config() # get arguments from parser # constants", "Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if", "Input: audio file path Output: list of numpy arrays, each of them representing", "if representetor speaks first \"\"\" # from the first frame of sound measure", "who speaks first in the call according to phone tone. If a call", "<gh_stars>0 \"\"\" This file contains the code for signal processing module of application.", "2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins", "config.sr) # split the audio to voice and no-voice according to amplitude intervals", "the code for signal processing module of application. \"\"\" import numpy as np", "tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr,", "length, split by 20DB limit Input: audio file path Output: list of numpy", "an audio segment, split it into frames according to size config.tisv_frame Input: seg", "a file, output a numpy array with frames exreacted from the call of", "= librosa.effects.split(call_audio, top_db = 20) # lower bound of utterance length - below", "call type Input: call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker", "Identify who speaks first in the call according to phone tone. If a", "a single speech utterance list of numpy array representing the timestamp start and", "in the call \"\"\" # extract audio call_audio, _ = librosa.core.load(call_file, config.sr) #", "utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and extract spectrograms binned", "50% overlap STFT_frames = [] for j in range(0, spect.shape[1], int(.12/config.hop)): if j", "= librosa.core.load(call_file, config.sr) # split the audio to voice and no-voice according to", "file contains the code for signal processing module of application. \"\"\" import numpy", "bound of utterance length - below that discard utter_min_len = (config.tisv_frame * config.hop", "in intervals: # Check that utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len:", "\"\"\" spectrograms_list = [] # iterate on all utterances, extract spectrogram from each", "[] utterances_timestamps = [] for interval in intervals: # Check that utterance length", "intervals: # Check that utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]])", "n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) **", "the call of voice. The frames are utterances of minimal length, split by", "nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone", "spectrograms_list = [] # iterate on all utterances, extract spectrogram from each for", "librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20)", "def print_call_type(call_type): \"\"\" Used to print call type Input: call type int \"\"\"", "of frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows", "librosa.core.load(call_file, config.sr) # split the audio to voice and no-voice according to amplitude", "1.5 sec and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _", "* config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr,", "identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to print", "# Check that utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0],", "log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return", "a single speech utterance Output: list of numpy arrays, each of them representing", "speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from the first frame", "mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins =", "utterances, extract spectrogram from each for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft", "spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment, split it into", "frames exreacted from the call of voice. The frames are utterances of minimal", "== FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file):", "is calling to the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file):", "audio file path Output: list of numpy arrays, each of them representing a", "center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output", "== 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative is calling", "print_call_type(call_type): \"\"\" Used to print call type Input: call type int \"\"\" if", "# spectral magnitude f = np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)])", "to the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get", "from each for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window", "utterance in the call \"\"\" # extract audio call_audio, _ = librosa.core.load(call_file, config.sr)", "audio call_audio, _ = librosa.core.load(call_file, config.sr) # split the audio to voice and", "numpy arrays, each of them representing a spectrogram of a single speech utterance", "of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to", "sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of", "sec and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ =", "to voice and no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db = 20)", "# get arguments from parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER =", "and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative is calling to the", "\"\"\" Given an audio segment, split it into frames according to size config.tisv_frame", "return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and extract", "length - below that discard utter_min_len = (config.tisv_frame * config.hop + config.window) *", "of numpy arrays, each of them representing a single speech utterance list of", "of them representing a single speech utterance list of numpy array representing the", "on all utterances, extract spectrogram from each for utterance in utterances_list: spect =", "Get a list of utterances and extract spectrograms binned in mel-binning for each", "customer speaks first, else representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if", "utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop", "config.tisv_frame Input: seg - audio segment Output: list of frames \"\"\" # Extrct", "librosa config = get_config() # get arguments from parser # constants for output", "get_config() # get arguments from parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER", "first in the call according to phone tone. If a call starts with", "[] for j in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]:", "def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and extract spectrograms binned in", "segment, split it into frames according to size config.tisv_frame Input: seg - audio", "spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap STFT_frames =", "to print call type Input: call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER:", "representing the timestamp start and end of each utterance in the call \"\"\"", "is calling to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this", "discard utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr utterances_list = []", "else: # this means customer is calling to the call center, represntative speaks", "list of numpy array representing the timestamp start and end of each utterance", "this means represntative is calling to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER", "else representative. Input: path to a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first,", "import librosa config = get_config() # get arguments from parser # constants for", "FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from", "# extract audio call_audio, _ = librosa.core.load(call_file, config.sr) # split the audio to", "representing a single speech utterance list of numpy array representing the timestamp start", "max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative is calling to the customer,", "utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a", "top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f", "if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative", "extract spectrogram from each for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft =", "it into frames according to size config.tisv_frame Input: seg - audio segment Output:", "numpy array representing the timestamp start and end of each utterance in the", "is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list,", "np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of", "nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db", "= 1 def print_call_type(call_type): \"\"\" Used to print call type Input: call type", "# from the first frame of sound measure 1.5 sec and look for", "them representing a single speech utterance list of numpy array representing the timestamp", "speech utterance \"\"\" spectrograms_list = [] # iterate on all utterances, extract spectrogram", "print(\"First speaker is customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify", "= (config.tisv_frame * config.hop + config.window) * config.sr utterances_list = [] utterances_timestamps =", "spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio segment,", "config.sr utterances_list = [] utterances_timestamps = [] for interval in intervals: # Check", "FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy array with frames", "voice and no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db = 20) #", "= np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an", "end of each utterance in the call \"\"\" # extract audio call_audio, _", "FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from the first frame of sound", "(round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means represntative is", "def split_segment_to_frames(seg): \"\"\" Given an audio segment, split it into frames according to", "spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel", "tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio,", "this means customer is calling to the call center, represntative speaks first return", "representing a single speech utterance Output: list of numpy arrays, each of them", "list of numpy arrays, each of them representing a single speech utterance Output:", "n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6)", "a call Output: FIRST_SPEAKER_CUSTOMER if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first", "= librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag", "of voice. The frames are utterances of minimal length, split by 20DB limit", "mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list", "spectrograms binned in mel-binning for each frame Input: list of numpy arrays, each", "tone detected! this means represntative is calling to the customer, customer speaks first", "numpy array with frames exreacted from the call of voice. The frames are", "them representing a spectrogram of a single speech utterance \"\"\" spectrograms_list = []", "Output: list of frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame", "utterance Output: list of numpy arrays, each of them representing a spectrogram of", "Input: seg - audio segment Output: list of frames \"\"\" # Extrct spectrogram", "and end of each utterance in the call \"\"\" # extract audio call_audio,", "get_config import scipy import librosa config = get_config() # get arguments from parser", "print call type Input: call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First", "audio to voice and no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db =", "a call starts with a dial tone customer speaks first, else representative. Input:", "# Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap", "sound measure 1.5 sec and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr)", "by 20DB limit Input: audio file path Output: list of numpy arrays, each", "# constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def", "win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2 mel_basis =", "of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg):", "= scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr, nof_frames)", "np from configuration import get_config import scipy import librosa config = get_config() #", "customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from the first", "dialing tone detected! this means represntative is calling to the customer, customer speaks", "_ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft = scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames])", "of utterance length - below that discard utter_min_len = (config.tisv_frame * config.hop +", "speaks first \"\"\" # from the first frame of sound measure 1.5 sec", "split the audio to voice and no-voice according to amplitude intervals = librosa.effects.split(call_audio,", "that utterance length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1],", "configuration import get_config import scipy import librosa config = get_config() # get arguments", "no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db = 20) # lower bound", "of numpy arrays, each of them representing a spectrogram of a single speech", "of a single speech utterance \"\"\" spectrograms_list = [] # iterate on all", "# split the audio to voice and no-voice according to amplitude intervals =", "utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\"", "size config.tisv_frame Input: seg - audio segment Output: list of frames \"\"\" #", "Get a file, output a numpy array with frames exreacted from the call", "for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals", "each utterance in the call \"\"\" # extract audio call_audio, _ = librosa.core.load(call_file,", "= config.nfft, win_length=int(config.window * config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2", "sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps", "400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals =", "variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh): # dialing tone detected! this means", "speech utterance list of numpy array representing the timestamp start and end of", "single speech utterance Output: list of numpy arrays, each of them representing a", "segment Output: list of frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get", "(config.tisv_frame * config.hop + config.window) * config.sr utterances_list = [] utterances_timestamps = []", "arrays, each of them representing a spectrogram of a single speech utterance \"\"\"", "means customer is calling to the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE", "\"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50%", "split_segment_to_frames(seg): \"\"\" Given an audio segment, split it into frames according to size", "as np from configuration import get_config import scipy import librosa config = get_config()", "librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances", "_ = librosa.core.load(call_file, config.sr) # split the audio to voice and no-voice according", "call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft =", "arrays, each of them representing a single speech utterance list of numpy array", "# iterate on all utterances, extract spectrogram from each for utterance in utterances_list:", "call starts with a dial tone customer speaks first, else representative. Input: path", "scipy.fft(call_audio[intervals[0][0]:intervals[0][0]+nof_frames]) tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr, nof_frames) #", "config.tisv_frame STFT windows with 50% overlap STFT_frames = [] for j in range(0,", "the timestamp start and end of each utterance in the call \"\"\" #", "are utterances of minimal length, split by 20DB limit Input: audio file path", "This file contains the code for signal processing module of application. \"\"\" import", "int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker", "phone tone. If a call starts with a dial tone customer speaks first,", "spectrogram of a single speech utterance \"\"\" spectrograms_list = [] # iterate on", "utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get", "\"\"\" Get a list of utterances and extract spectrograms binned in mel-binning for", "= np.linspace(0, config.sr, nof_frames) # frequency variable if (round(f[np.argmax(tone_fft_mag)]) == 400 and max(tone_fft_mag)>config.dialing_tone_thresh):", "# lower bound of utterance length - below that discard utter_min_len = (config.tisv_frame", "= 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to print call type", "sr=config.sr)])) return utterances_list, utterances_timestamps def extract_spectrograms_from_utterances(utterances_list): \"\"\" Get a list of utterances and", "in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]: STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame])) else: break", "a spectrogram of a single speech utterance \"\"\" spectrograms_list = [] # iterate", "if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return utterances_list, utterances_timestamps def", "spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]: STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame])) else: break return STFT_frames", "for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\"", "# this means customer is calling to the call center, represntative speaks first", "mel-binning for each frame Input: list of numpy arrays, each of them representing", "speech utterance Output: list of numpy arrays, each of them representing a spectrogram", "config.hop + config.window) * config.sr utterances_list = [] utterances_timestamps = [] for interval", "np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap STFT_frames = [] for j", "first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from the first frame of", "\"\"\" # extract audio call_audio, _ = librosa.core.load(call_file, config.sr) # split the audio", "= get_config() # get arguments from parser # constants for output of identify_call_type", "of application. \"\"\" import numpy as np from configuration import get_config import scipy", "sr=config.sr) call_audio, _ = librosa.core.load(call_file, config.sr) intervals = librosa.effects.split(call_audio, top_db = 20) tone_fft", "split it into frames according to size config.tisv_frame Input: seg - audio segment", "the audio to voice and no-voice according to amplitude intervals = librosa.effects.split(call_audio, top_db", "\"\"\" Get a file, output a numpy array with frames exreacted from the", "with a dial tone customer speaks first, else representative. Input: path to a", "if customer speaks first, FIRST_SPEAKER_REPRESENTATIVE if representetor speaks first \"\"\" # from the", "speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who speaks first in the call", "* config.sr)) spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) #", "first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy array", "utterances_timestamps = [] for interval in intervals: # Check that utterance length is", "according to phone tone. If a call starts with a dial tone customer", "return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a file, output a numpy array with", "numpy arrays, each of them representing a single speech utterance list of numpy", "utterance length - below that discard utter_min_len = (config.tisv_frame * config.hop + config.window)", "list of numpy arrays, each of them representing a single speech utterance list", "librosa.filters.mel(sr=config.sr, n_fft=config.nfft, n_mels=config.mel_nof) # log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect)", "spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with 50% overlap STFT_frames = []", "frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0]) #Get config.tisv_frame STFT windows with", "get arguments from parser # constants for output of identify_call_type FIRST_SPEAKER_CUSTOMER = 0", "minimal length, split by 20DB limit Input: audio file path Output: list of", "path Output: list of numpy arrays, each of them representing a single speech", "timestamp start and end of each utterance in the call \"\"\" # extract", "a dial tone customer speaks first, else representative. Input: path to a call", "with frames exreacted from the call of voice. The frames are utterances of", "spectrogram from each for utterance in utterances_list: spect = librosa.core.stft(y=utterance, n_fft = config.nfft,", "numpy as np from configuration import get_config import scipy import librosa config =", "FIRST_SPEAKER_CUSTOMER = 0 FIRST_SPEAKER_REPRESENTATIVE = 1 def print_call_type(call_type): \"\"\" Used to print call", "frame Input: list of numpy arrays, each of them representing a single speech", "processing module of application. \"\"\" import numpy as np from configuration import get_config", "type Input: call type int \"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is", "The frames are utterances of minimal length, split by 20DB limit Input: audio", "the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\" Get a", "= [] utterances_timestamps = [] for interval in intervals: # Check that utterance", "utterance list of numpy array representing the timestamp start and end of each", "all utterances, extract spectrogram from each for utterance in utterances_list: spect = librosa.core.stft(y=utterance,", "audio segment Output: list of frames \"\"\" # Extrct spectrogram spect = np.transpose(extract_spectrograms_from_utterances([seg])[0])", "iterate on all utterances, extract spectrogram from each for utterance in utterances_list: spect", "top_db = 20) # lower bound of utterance length - below that discard", "them representing a single speech utterance Output: list of numpy arrays, each of", "# log mel spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins))", "from the call of voice. The frames are utterances of minimal length, split", "file, output a numpy array with frames exreacted from the call of voice.", "detected! this means represntative is calling to the customer, customer speaks first return", "config = get_config() # get arguments from parser # constants for output of", "from configuration import get_config import scipy import librosa config = get_config() # get", "np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def split_segment_to_frames(seg): \"\"\" Given an audio", "single speech utterance list of numpy array representing the timestamp start and end", "j in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]: STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame])) else:", "a list of utterances and extract spectrograms binned in mel-binning for each frame", "spectrogram of utterances spect_bins = np.log10(np.dot(mel_basis, spect) + 1e-6) spectrograms_list.append(np.transpose(spect_bins)) return spectrograms_list def", "= [] for interval in intervals: # Check that utterance length is sufficient", "first return FIRST_SPEAKER_CUSTOMER else: # this means customer is calling to the call", "for each frame Input: list of numpy arrays, each of them representing a", "signal processing module of application. \"\"\" import numpy as np from configuration import", "call according to phone tone. If a call starts with a dial tone", "utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr utterances_list = [] utterances_timestamps", "to phone tone. If a call starts with a dial tone customer speaks", "calling to the call center, represntative speaks first return FIRST_SPEAKER_REPRESENTATIVE def extract_utterances_from_a_call(call_file): \"\"\"", "of them representing a single speech utterance Output: list of numpy arrays, each", "and look for 400Hz tone nof_frames = librosa.core.time_to_samples(1.5, sr=config.sr) call_audio, _ = librosa.core.load(call_file,", "\"\"\" if call_type == FIRST_SPEAKER_CUSTOMER: print(\"First speaker is customer\") else: print(\"First speaker is", "config.sr), hop_length=int(config.hop * config.sr)) spect = np.abs(spect) ** 2 mel_basis = librosa.filters.mel(sr=config.sr, n_fft=config.nfft,", "np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr, nof_frames) # frequency variable if", "def identify_call_type(call_file): \"\"\" Identify who speaks first in the call according to phone", "to the customer, customer speaks first return FIRST_SPEAKER_CUSTOMER else: # this means customer", "length is sufficient if (interval[1]-interval[0]) > utter_min_len: utterances_list.append(call_audio[interval[0]:interval[1]]) utterances_timestamps.append(np.array([librosa.core.samples_to_time(interval[0], sr=config.sr), librosa.core.samples_to_time(interval[1], sr=config.sr)])) return", "that discard utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr utterances_list =", "- audio segment Output: list of frames \"\"\" # Extrct spectrogram spect =", "speaker is customer\") else: print(\"First speaker is representative\") def identify_call_type(call_file): \"\"\" Identify who", "for j in range(0, spect.shape[1], int(.12/config.hop)): if j + config.tisv_frame < spect.shape[1]: STFT_frames.append(np.transpose(spect[:,j:j+config.tisv_frame]))", "tone_fft_mag = np.absolute(tone_fft) # spectral magnitude f = np.linspace(0, config.sr, nof_frames) # frequency" ]
[ "https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "Args: split_name: the name of the dataset split, as a string (case insensitive).", "the split name is not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not", "LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "build_generator(self, data): \"\"\"Builds a generator that yields each element from data.\"\"\" for example", "video_captions(): \"\"\"Returns a dict of that maps from video id to a list", "The three splits returned are for \"train\", \"valid\", and \"test\" splits. The returned", "import abstractmethod import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for", "valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id},", "\"train\": return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif split_name == \"test\":", "a tuple of sets providing ids for the dataset splits. Returns: a tuple", "def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed", "Returns: a tuple of three tuples. Each tuple has two elements, the first", "= [] test_data = [] for video_id, caption in self.video_captions: if video_id in", "\"train\", \"valid\", and \"test\" splits. The returned data is structured as follows: (", "pair datasets for each split in this dataset. Returns: a tuple of three", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "abstractmethod import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video", "dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the given", "id caption pair datasets for each split in this dataset. Returns: a tuple", "test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data)", "of that maps from video id to a list of captions.\"\"\" pass @property", "insensitive). The split name can be \"train\", \"valid\", or \"test\". Returns: an integer", "split, as a string (case insensitive). The split name can be \"train\", \"valid\",", "for the validation data, and the third for the test data.\"\"\" pass def", "integer that represents the number of examples in the given split. Raises: ValueError", "the first element is the data for the train split, followed by the", "self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset", "the License for the specific language governing permissions and limitations under the License.", "abc import abstractmethod import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class", "pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict of that maps from video", "@abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids for the dataset", "in the given split in this dataset. Args: split_name: the name of the", "train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"),", "build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of id caption pairs in data.\"\"\"", "License for the specific language governing permissions and limitations under the License. \"\"\"", "instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids =", "Unless required by applicable law or agreed to in writing, software distributed under", "name can be \"train\", \"valid\", or \"test\". Returns: an integer that represents the", "elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else:", "be \"train\", \"valid\", or \"test\". Returns: an integer that represents the number of", "of examples in the given split. Raises: ValueError if the split name is", "has two elements, the first is a tf.data.Dataset of video id caption pairs,", "License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id:", "object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod", "and the third for the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a", "valid_data = [] test_data = [] for video_id, caption in self.video_captions: if video_id", "of sets providing ids for the dataset splits. Returns: a tuple of sets,", "split name is not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\"", "ids for the train data, the second for the validation data, and the", "is the data for the train split, followed by the valid and test", "# Accessing the property self.id_caption_pair_datasets counts the # number of examples in each", "License, Version 2.0 (the \"License\"); you may not use this file except in", "License. \"\"\" from abc import ABC as AbstractClass from abc import abstractmethod import", "given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict of that maps", "Dataset out of id caption pairs in data.\"\"\" generator = lambda: self.build_generator(data) return", "for the dataset splits. Returns: a tuple of sets, where the first set", "set contains the ids for the train data, the second for the validation", "if the dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data object", "number of examples in the given split in this dataset. Args: split_name: the", "of sets, where the first set contains the ids for the train data,", "a tuple of sets, where the first set contains the ids for the", "\"\"\"Returns a tuple of sets providing ids for the dataset splits. Returns: a", "def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of id caption pairs in", "as a string (case insensitive). The split name can be \"train\", \"valid\", or", "in the given split. Raises: ValueError if the split name is not \"train\",", "the third for the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator", "list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\"", "pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the given dataset.\"\"\"", "of video id caption pairs, and the second element is the name of", "three splits returned are for \"train\", \"valid\", and \"test\" splits. The returned data", "as AbstractClass from abc import abstractmethod import pathlib import tensorflow as tf class", "test sets. The three splits returned are for \"train\", \"valid\", and \"test\" splits.", "out of id caption pairs in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator,", "\"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing the property self.id_caption_pair_datasets counts", "data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of id", "split, followed by the valid and test sets. The three splits returned are", "split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "[] valid_data = [] test_data = [] for video_id, caption in self.video_captions: if", "len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset,", "id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "@property @abstractmethod def video_captions(): \"\"\"Returns a dict of that maps from video id", "test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator that yields each element", "caption pairs in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property", "def video_captions(): \"\"\"Returns a dict of that maps from video id to a", "\"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in the given", "and caches precomputed features for the given dataset.\"\"\" pass @property @abstractmethod def video_captions():", "@abstractmethod def video_captions(): \"\"\"Returns a dict of that maps from video id to", "copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name", "a generator that yields each element from data.\"\"\" for example in data: yield", "base class for datasets. Copyright 2020 Google LLC Licensed under the Apache License,", "split _ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\": return self.num_of_train_examples", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"\"\"Get id caption pair datasets for each split in this dataset. Returns: a", "self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples", "@abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the dataset is downloaded.\"\"\" pass @property", "name is not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self):", "in compliance with the License. You may obtain a copy of the License", "the dataset splits. Returns: a tuple of sets, where the first set contains", "precomputed features for the given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a", "Each tuple has two elements, the first is a tf.data.Dataset of video id", "test_data = [] for video_id, caption in self.video_captions: if video_id in train_ids: train_data.append((video_id,", "split_name: the name of the dataset split, as a string (case insensitive). The", "KIND, either express or implied. See the License for the specific language governing", "def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in the given split in", "in writing, software distributed under the License is distributed on an \"AS IS\"", "2020 Google LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you", "dataset. Args: split_name: the name of the dataset split, as a string (case", "\"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of", "follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids,", "the number of examples in the given split. Raises: ValueError if the split", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "generator that yields each element from data.\"\"\" for example in data: yield example", "def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the given dataset.\"\"\" pass @property", "train data, the second for the validation data, and the third for the", "the second for the validation data, and the third for the test data.\"\"\"", "is the name of the split as a string. In the retured tuple,", "returned data is structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"),", "data, and the third for the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds", "dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the dataset is", "or agreed to in writing, software distributed under the License is distributed on", "the License. \"\"\" from abc import ABC as AbstractClass from abc import abstractmethod", "the valid and test sets. The three splits returned are for \"train\", \"valid\",", "= len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return (", "yields each element from data.\"\"\" for example in data: yield example def build_id_caption_pair_generator_dataset(self,", "split_name == \"valid\": return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples else: raise", "data, the second for the validation data, and the third for the test", "[] test_data = [] for video_id, caption in self.video_captions: if video_id in train_ids:", "name of the split as a string. In the retured tuple, the first", "represents the number of examples in the given split. Raises: ValueError if the", "\"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing the property", "= [] valid_data = [] test_data = [] for video_id, caption in self.video_captions:", "<reponame>googleinterns/via-content-understanding \"\"\"Defines a base class for datasets. Copyright 2020 Google LLC Licensed under", "self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif split_name ==", "downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\" pass", "the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "= self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self,", "\"test\". Returns: an integer that represents the number of examples in the given", "not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing", "in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self):", "that yields each element from data.\"\"\" for example in data: yield example def", "that maps from video id to a list of captions.\"\"\" pass @property @abstractmethod", "len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data)", "retured tuple, the first element is the data for the train split, followed", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "OF ANY KIND, either express or implied. See the License for the specific", "the given split. Raises: ValueError if the split name is not \"train\", \"valid\",", "return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif split_name == \"test\": return", "train_data = [] valid_data = [] test_data = [] for video_id, caption in", "video_id in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id", "may not use this file except in compliance with the License. You may", "governing permissions and limitations under the License. \"\"\" from abc import ABC as", "(tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data", "def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self):", "data.\"\"\" for example in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data", "In the retured tuple, the first element is the data for the train", "datasets. Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id", "@property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids for the", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "self.train_valid_test_ids train_data = [] valid_data = [] test_data = [] for video_id, caption", "video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples", "example in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out", "maps from video id to a list of captions.\"\"\" pass @property @abstractmethod def", "\"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in", "as a string. In the retured tuple, the first element is the data", "dir(self): # Accessing the property self.id_caption_pair_datasets counts the # number of examples in", "each split _ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\": return", "data): \"\"\"Builds a generator that yields each element from data.\"\"\" for example in", "Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the \"License\");", "Raises: ValueError if the split name is not \"train\", \"valid\", or \"test\". \"\"\"", "See the License for the specific language governing permissions and limitations under the", "\"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data = [] test_data", "video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass", "returned are for \"train\", \"valid\", and \"test\" splits. The returned data is structured", "contains the ids for the train data, the second for the validation data,", "and the second element is the name of the split as a string.", "tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each split in", "name of the dataset split, as a string (case insensitive). The split name", "a string. In the retured tuple, the first element is the data for", "of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass", "is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\"", "[] for video_id, caption in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif", "Accessing the property self.id_caption_pair_datasets counts the # number of examples in each split", "tuple, the first element is the data for the train split, followed by", "sets. The three splits returned are for \"train\", \"valid\", and \"test\" splits. The", "the split as a string. In the retured tuple, the first element is", "this file except in compliance with the License. You may obtain a copy", "of id caption pairs in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string,", "(valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples", "limitations under the License. \"\"\" from abc import ABC as AbstractClass from abc", "caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset =", "\"License\"); you may not use this file except in compliance with the License.", "test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def", "def dataset_downloaded(self): \"\"\"A boolean describing if the dataset is downloaded.\"\"\" pass @property def", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "you may not use this file except in compliance with the License. You", "\"valid\": return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples else: raise ValueError(\"Illegal split", "pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples =", "valid and test sets. The three splits returned are for \"train\", \"valid\", and", "agreed to in writing, software distributed under the License is distributed on an", "video_id, caption in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif video_id in", "not in dir(self): # Accessing the property self.id_caption_pair_datasets counts the # number of", "train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids for the dataset splits. Returns:", "the retured tuple, the first element is the data for the train split,", "{video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset", "dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data object representing the", "\"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name of", "if split_name == \"train\": return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif", "number of examples in the given split. Raises: ValueError if the split name", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "ValueError if the split name is not \"train\", \"valid\", or \"test\". \"\"\" if", "instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data =", "with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0", "from data.\"\"\" for example in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a", "if video_id in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif", "abc import ABC as AbstractClass from abc import abstractmethod import pathlib import tensorflow", "implied. See the License for the specific language governing permissions and limitations under", "the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the dataset", "the given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict of that", "dataset_downloaded(self): \"\"\"A boolean describing if the dataset is downloaded.\"\"\" pass @property def data(self):", "\"test\" splits. The returned data is structured as follows: ( (tf.data.Dataset instance, \"train\"),", "self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name):", "in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id,", "\"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing the property self.id_caption_pair_datasets counts the", "for example in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset", "tf.data Dataset out of id caption pairs in data.\"\"\" generator = lambda: self.build_generator(data)", "examples in the given split. Raises: ValueError if the split name is not", "print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples", "\"\"\"Gets a tf.data object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the", "string. In the retured tuple, the first element is the data for the", "\"valid\", or \"test\". Returns: an integer that represents the number of examples in", "at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"),", "for the given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict of", "use this file except in compliance with the License. You may obtain a", "data for the train split, followed by the valid and test sets. The", "elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\")", "caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples =", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids for", "elements, the first is a tf.data.Dataset of video id caption pairs, and the", "pairs, and the second element is the name of the split as a", "property self.id_caption_pair_datasets counts the # number of examples in each split _ =", "dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads", "third for the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator that", "datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass @property", "Returns: a tuple of sets, where the first set contains the ids for", "the ids for the train data, the second for the validation data, and", "in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in", "tuple of three tuples. Each tuple has two elements, the first is a", "the # number of examples in each split _ = self.id_caption_pair_datasets split_name =", "element is the data for the train split, followed by the valid and", "= [] for video_id, caption in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption))", "can be \"train\", \"valid\", or \"test\". Returns: an integer that represents the number", "yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of id caption", "features for the given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict", "the name of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing", "valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data = [] test_data = []", "the data for the train split, followed by the valid and test sets.", "import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\"", "return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for", "required by applicable law or agreed to in writing, software distributed under the", "for video_id, caption in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif video_id", "\"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for", "pass @property def data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\" pass @abstractmethod", "the number of examples in the given split in this dataset. Args: split_name:", "an integer that represents the number of examples in the given split. Raises:", "@property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod def", "test_ids = self.train_valid_test_ids train_data = [] valid_data = [] test_data = [] for", "this dataset. Args: split_name: the name of the dataset split, as a string", "@property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each split in this", "where the first set contains the ids for the train data, the second", "dataset split, as a string (case insensitive). The split name can be \"train\",", "\"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple", "the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator that yields each", "string (case insensitive). The split name can be \"train\", \"valid\", or \"test\". Returns:", "given split. Raises: ValueError if the split name is not \"train\", \"valid\", or", "pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod", "tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each", "dict of that maps from video id to a list of captions.\"\"\" pass", "the first is a tf.data.Dataset of video id caption pairs, and the second", "\"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = []", "name of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if", "a tf.data object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\"", "self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption))", "instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data =", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "not use this file except in compliance with the License. You may obtain", "a dict of that maps from video id to a list of captions.\"\"\"", "@property def data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\" pass @abstractmethod def", "of captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of", "obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "for \"train\", \"valid\", and \"test\" splits. The returned data is structured as follows:", "three tuples. Each tuple has two elements, the first is a tf.data.Dataset of", "# number of examples in each split _ = self.id_caption_pair_datasets split_name = split_name.lower()", "self.id_caption_pair_datasets counts the # number of examples in each split _ = self.id_caption_pair_datasets", "example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of id caption pairs", "tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets", "{hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data)", "num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in the given split in this", "train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id,", "providing ids for the dataset splits. Returns: a tuple of sets, where the", "a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") )", "ANY KIND, either express or implied. See the License for the specific language", "@abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the given dataset.\"\"\" pass", "id caption pairs, and the second element is the name of the split", "train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids:", "file except in compliance with the License. You may obtain a copy of", "pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property", "a tf.data.Dataset of video id caption pairs, and the second element is the", "video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids", "counts the # number of examples in each split _ = self.id_caption_pair_datasets split_name", "2.0 (the \"License\"); you may not use this file except in compliance with", "element is the name of the split as a string. In the retured", "splits. The returned data is structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset", "caption in self.video_captions: if video_id in train_ids: train_data.append((video_id, caption)) elif video_id in valid_ids:", "for the test data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator that yields", "class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the", "first element is the data for the train split, followed by the valid", "split in this dataset. Returns: a tuple of three tuples. Each tuple has", "The split name can be \"train\", \"valid\", or \"test\". Returns: an integer that", "valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset,", "for each split in this dataset. Returns: a tuple of three tuples. Each", "(test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in the", "a tf.data Dataset out of id caption pairs in data.\"\"\" generator = lambda:", "the validation data, and the third for the test data.\"\"\" pass def build_generator(self,", "(the \"License\"); you may not use this file except in compliance with the", "\"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data = []", "\"valid\", and \"test\" splits. The returned data is structured as follows: ( (tf.data.Dataset", "def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns", "the second element is the name of the split as a string. In", "if the split name is not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\"", "\"\"\"Builds a tf.data Dataset out of id caption pairs in data.\"\"\" generator =", "the name of the split as a string. In the retured tuple, the", "id to a list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of", "= self.train_valid_test_ids train_data = [] valid_data = [] test_data = [] for video_id,", "@property @abstractmethod def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass @property @abstractmethod", "the train split, followed by the valid and test sets. The three splits", "@abstractmethod def dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass @property @abstractmethod def", "tuples. Each tuple has two elements, the first is a tf.data.Dataset of video", "video id caption pairs, and the second element is the name of the", "the given split in this dataset. Args: split_name: the name of the dataset", "for the train split, followed by the valid and test sets. The three", "\"\"\"Defines a base class for datasets. Copyright 2020 Google LLC Licensed under the", "second element is the name of the split as a string. In the", "number of examples in each split _ = self.id_caption_pair_datasets split_name = split_name.lower() if", "the property self.id_caption_pair_datasets counts the # number of examples in each split _", "train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data = [] test_data =", "\"\"\"Gets the name of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean", "pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the dataset is downloaded.\"\"\"", "= len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset =", "(case insensitive). The split name can be \"train\", \"valid\", or \"test\". Returns: an", "sets providing ids for the dataset splits. Returns: a tuple of sets, where", "examples in the given split in this dataset. Args: split_name: the name of", "caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption:", "representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def", "\"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "the first set contains the ids for the train data, the second for", "\"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing the", "from video id to a list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self):", "= lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption", "this dataset. Returns: a tuple of three tuples. Each tuple has two elements,", "data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads", "of three tuples. Each tuple has two elements, the first is a tf.data.Dataset", "You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by", "may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "datasets for each split in this dataset. Returns: a tuple of three tuples.", "(train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the number", "law or agreed to in writing, software distributed under the License is distributed", "caption pairs, and the second element is the name of the split as", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "split. Raises: ValueError if the split name is not \"train\", \"valid\", or \"test\".", "dataset splits. Returns: a tuple of sets, where the first set contains the", "elif split_name == \"valid\": return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples else:", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "of examples in each split _ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name", "tuple of sets, where the first set contains the ids for the train", "element from data.\"\"\" for example in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds", "def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each split in this dataset.", "== \"valid\": return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples else: raise ValueError(\"Illegal", "captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property", "are for \"train\", \"valid\", and \"test\" splits. The returned data is structured as", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "boolean describing if the dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a", ") def num_of_examples_by_split(self, split_name): \"\"\"Gets the number of examples in the given split", "for the specific language governing permissions and limitations under the License. \"\"\" from", "\"train\", \"valid\", or \"test\". Returns: an integer that represents the number of examples", "language governing permissions and limitations under the License. \"\"\" from abc import ABC", "either express or implied. See the License for the specific language governing permissions", "\"\"\" from abc import ABC as AbstractClass from abc import abstractmethod import pathlib", "pass def build_generator(self, data): \"\"\"Builds a generator that yields each element from data.\"\"\"", "tf.data object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass", "the dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data object representing", "self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return", "Google LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "of the dataset split, as a string (case insensitive). The split name can", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "data.\"\"\" pass def build_generator(self, data): \"\"\"Builds a generator that yields each element from", "import ABC as AbstractClass from abc import abstractmethod import pathlib import tensorflow as", "@abstractmethod def captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self):", "in data: yield example def build_id_caption_pair_generator_dataset(self, data): \"\"\"Builds a tf.data Dataset out of", "= self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset,", "(tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each split", "self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets", "describing if the dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets a tf.data", "and \"test\" splits. The returned data is structured as follows: ( (tf.data.Dataset instance,", "id_caption_pair_datasets(self): \"\"\"Get id caption pair datasets for each split in this dataset. Returns:", "the train data, the second for the validation data, and the third for", "sets, where the first set contains the ids for the train data, the", "a string (case insensitive). The split name can be \"train\", \"valid\", or \"test\".", "splits returned are for \"train\", \"valid\", and \"test\" splits. The returned data is", "each element from data.\"\"\" for example in data: yield example def build_id_caption_pair_generator_dataset(self, data):", "pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and", "splits. Returns: a tuple of sets, where the first set contains the ids", "structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\"))", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "dataset. Returns: a tuple of three tuples. Each tuple has two elements, the", "ids for the dataset splits. Returns: a tuple of sets, where the first", "dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns a dict of that maps from", "to in writing, software distributed under the License is distributed on an \"AS", "a tuple of three tuples. Each tuple has two elements, the first is", "is structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance,", "of the split as a string. In the retured tuple, the first element", "tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def", "of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the", "for datasets. Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0", "as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self):", "a list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions per", "under the License. \"\"\" from abc import ABC as AbstractClass from abc import", "captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets", "except in compliance with the License. You may obtain a copy of the", "followed by the valid and test sets. The three splits returned are for", "The returned data is structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance,", "tuple of sets providing ids for the dataset splits. Returns: a tuple of", "in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair:", "= len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset =", "of examples in the given split in this dataset. Args: split_name: the name", "= self.build_id_caption_pair_generator_dataset(valid_data) test_dataset = self.build_id_caption_pair_generator_dataset(test_data) return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\")", "split in this dataset. Args: split_name: the name of the dataset split, as", "= self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif split_name", "return self.num_of_valid_examples elif split_name == \"test\": return self.num_of_test_examples else: raise ValueError(\"Illegal split name\")", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "train split, followed by the valid and test sets. The three splits returned", "is a tf.data.Dataset of video id caption pairs, and the second element is", "that represents the number of examples in the given split. Raises: ValueError if", "of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "is not \"train\", \"valid\", or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): #", "download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the given dataset.\"\"\" pass @property @abstractmethod", "class for video datasets.\"\"\" @property @abstractmethod def dataset_name(self): \"\"\"Gets the name of the", "= split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif split_name == \"valid\": return", "as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\"", "for the train data, the second for the validation data, and the third", "split_name == \"train\": return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif split_name", "\"\"\"Gets the number of examples in the given split in this dataset. Args:", "self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data) test_dataset", "in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples =", "def build_generator(self, data): \"\"\"Builds a generator that yields each element from data.\"\"\" for", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"\"\"Downloads and caches precomputed features for the given dataset.\"\"\" pass @property @abstractmethod def", "caches precomputed features for the given dataset.\"\"\" pass @property @abstractmethod def video_captions(): \"\"\"Returns", "return ( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets", "each split in this dataset. Returns: a tuple of three tuples. Each tuple", "given split in this dataset. Args: split_name: the name of the dataset split,", "( (train_dataset, \"train\"), (valid_dataset, \"valid\"), (test_dataset, \"test\") ) def num_of_examples_by_split(self, split_name): \"\"\"Gets the", "video id to a list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number", "and limitations under the License. \"\"\" from abc import ABC as AbstractClass from", "captions_per_video(self): \"\"\"Number of captions per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a", "else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples = len(valid_data)", "@property @abstractmethod def dataset_downloaded(self): \"\"\"A boolean describing if the dataset is downloaded.\"\"\" pass", "if \"num_of_train_examples\" not in dir(self): # Accessing the property self.id_caption_pair_datasets counts the #", "compliance with the License. You may obtain a copy of the License at", "first set contains the ids for the train data, the second for the", "import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base class for video datasets.\"\"\" @property @abstractmethod", "or \"test\". \"\"\" if \"num_of_train_examples\" not in dir(self): # Accessing the property self.id_caption_pair_datasets", "class for datasets. Copyright 2020 Google LLC Licensed under the Apache License, Version", "split_name): \"\"\"Gets the number of examples in the given split in this dataset.", "express or implied. See the License for the specific language governing permissions and", "@abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches", "pairs in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def", "permissions and limitations under the License. \"\"\" from abc import ABC as AbstractClass", "License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required", "\"\"\"Returns a dict of that maps from video id to a list of", "split name can be \"train\", \"valid\", or \"test\". Returns: an integer that represents", "in each split _ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\":", "test_data.append((video_id, caption)) else: print(f\"Orphan pair: id: {video_id}, caption: {hash(caption)}\") self.num_of_train_examples = len(train_data) self.num_of_valid_examples", "(tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids", "or \"test\". Returns: an integer that represents the number of examples in the", "by the valid and test sets. The three splits returned are for \"train\",", "dataset_name(self): \"\"\"Gets the name of the dataset.\"\"\" pass @property @abstractmethod def dataset_downloaded(self): \"\"\"A", "( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids,", "download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features", "caption pair datasets for each split in this dataset. Returns: a tuple of", "first is a tf.data.Dataset of video id caption pairs, and the second element", "from abc import ABC as AbstractClass from abc import abstractmethod import pathlib import", "the name of the dataset split, as a string (case insensitive). The split", "def data(self): \"\"\"Gets a tf.data object representing the dataset\"\"\" pass @abstractmethod def download_dataset(self):", "applicable law or agreed to in writing, software distributed under the License is", "data): \"\"\"Builds a tf.data Dataset out of id caption pairs in data.\"\"\" generator", "and test sets. The three splits returned are for \"train\", \"valid\", and \"test\"", "caption)) elif video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption))", "the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless", "per video.\"\"\" pass @property @abstractmethod def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing", "validation data, and the third for the test data.\"\"\" pass def build_generator(self, data):", "two elements, the first is a tf.data.Dataset of video id caption pairs, and", "from abc import abstractmethod import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass): \"\"\"Base", "AbstractClass from abc import abstractmethod import pathlib import tensorflow as tf class BaseVideoDataset(AbstractClass):", "split_name = split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif split_name == \"valid\":", "video_id in valid_ids: valid_data.append((video_id, caption)) elif video_id in test_ids: test_data.append((video_id, caption)) else: print(f\"Orphan", "_ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name == \"train\": return self.num_of_train_examples elif", "data is structured as follows: ( (tf.data.Dataset instance, \"train\"), (tf.data.Dataset instance, \"valid\"), (tf.data.Dataset", "== \"train\": return self.num_of_train_examples elif split_name == \"valid\": return self.num_of_valid_examples elif split_name ==", "\"\"\"Builds a generator that yields each element from data.\"\"\" for example in data:", "the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self): \"\"\"Downloads and caches precomputed features for the", "second for the validation data, and the third for the test data.\"\"\" pass", "specific language governing permissions and limitations under the License. \"\"\" from abc import", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get", "lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string)) @property def id_caption_pair_datasets(self): \"\"\"Get id caption pair", "Returns: an integer that represents the number of examples in the given split.", "split as a string. In the retured tuple, the first element is the", "def train_valid_test_ids(self): \"\"\"Returns a tuple of sets providing ids for the dataset splits.", "a base class for datasets. Copyright 2020 Google LLC Licensed under the Apache", "tuple has two elements, the first is a tf.data.Dataset of video id caption", "(tf.data.Dataset instance, \"test\")) \"\"\" train_ids, valid_ids, test_ids = self.train_valid_test_ids train_data = [] valid_data", "in dir(self): # Accessing the property self.id_caption_pair_datasets counts the # number of examples", "id caption pairs in data.\"\"\" generator = lambda: self.build_generator(data) return tf.data.Dataset.from_generator(generator, (tf.string, tf.string))", "\"\"\"A boolean describing if the dataset is downloaded.\"\"\" pass @property def data(self): \"\"\"Gets", "the specific language governing permissions and limitations under the License. \"\"\" from abc", "len(train_data) self.num_of_valid_examples = len(valid_data) self.num_of_test_examples = len(test_data) train_dataset = self.build_id_caption_pair_generator_dataset(train_data) valid_dataset = self.build_id_caption_pair_generator_dataset(valid_data)", "in this dataset. Returns: a tuple of three tuples. Each tuple has two", "ABC as AbstractClass from abc import abstractmethod import pathlib import tensorflow as tf", "to a list of captions.\"\"\" pass @property @abstractmethod def captions_per_video(self): \"\"\"Number of captions", "in this dataset. Args: split_name: the name of the dataset split, as a", "tf.data.Dataset of video id caption pairs, and the second element is the name", "examples in each split _ = self.id_caption_pair_datasets split_name = split_name.lower() if split_name ==", "\"num_of_train_examples\" not in dir(self): # Accessing the property self.id_caption_pair_datasets counts the # number", "the dataset split, as a string (case insensitive). The split name can be", "the dataset\"\"\" pass @abstractmethod def download_dataset(self): \"\"\"Downloads the dataset.\"\"\" pass @abstractmethod def download_and_cache_precomputed_features(self):" ]
[ "'#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116',", "'#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192',", "'#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1',", "'#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99',", "'#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca',", "\"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ],", "'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ],", "'#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c',", "'#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4',", "], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608',", "'#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e',", "'#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8',", "\"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c', '#616a6b', '#515a5a', '#424949' ]", "'#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad',", "'#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7',", "'#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[", "'#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c', '#616a6b',", "'#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\",", "], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32',", "'#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9',", "'#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc',", "'#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f',", "'#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9',", "'#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2',", "= { \"Turquoise\": [ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77',", "'#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6',", "'#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6',", "'#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5',", "'#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50',", "'#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[", "'#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb',", "'#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b',", "'#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad',", "'#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7',", "], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c', '#616a6b', '#515a5a', '#424949'", "], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72',", "'#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[", "'#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1',", "'#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22',", "'#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8',", "{ \"Turquoise\": [ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864',", "'#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0',", "\"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[", "'#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d',", "'#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22',", "'#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize", "], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\"", "'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ],", "'#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0',", "'#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c',", "'#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9',", "'#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4',", "'#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8',", "'#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee',", "'#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633',", "\"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf',", "'#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2',", "'#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce',", "'#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7',", "'#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6',", "\"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345'", "'#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910',", "'#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b',", "'#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[", "'#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0',", "'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631',", "'#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc',", "'#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1',", "'#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b',", "'#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\",", "'#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c',", "'#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98',", "'#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a',", "<filename>vispupu/colors.py<gh_stars>0 color_styles = { \"Turquoise\": [ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c',", ":[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ],", "'#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\",", "], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656'", "'#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0',", "'#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb',", "'#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497',", "'#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d',", "'#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de',", "'#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1',", "'#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655',", "\"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60',", "'#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a',", "], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00',", "'#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[", "'#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3',", "'#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db',", "'#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet", "'#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f',", "'#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483',", "'#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[", "\"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf',", "'#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4',", "'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ],", "'#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71',", "'#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96',", "'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a',", "'#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[", "'#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1',", "'#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f',", "color_styles = { \"Turquoise\": [ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589',", "'#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[", "'#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400',", "'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ],", "\"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80',", "'#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00',", "'#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e',", "'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a',", "'#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12',", "'#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7',", "], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c',", "'#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747',", "'#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8',", "'#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf',", "'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,'", "'#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[", "], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f',", "'#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa',", "'#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226',", "'#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a',", "'#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6',", "'#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb',", "\"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ],", "'#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4',", "'#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[", "'#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef',", "'#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c',", "'#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e',", "], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d',", "'#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\",", "'#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[", "], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833',", "\"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d',", "'#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75',", "'<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ],", "'#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[", "'#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9',", "], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212',", "], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109',", "'#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600',", "'#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400',", "'#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[", "'#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c',", "'#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7', '#e8daef', '#d2b4de', '#bb8fce', '#a569bd',", "'#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e',", "'#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8',", "], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f',", "'#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf',", "'#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe',", "'#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4',", ":[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c', '#616a6b', '#515a5a', '#424949' ] }", "\"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3',", "'#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9',", "'#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce',", "'#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria' :[ '#8e44ad', '#f4ecf7',", "'#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746',", "'#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[", "'#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12',", "'#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d',", "'#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f',", "'#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3',", "'#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3',", "'#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5',", "'#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335',", "\"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954',", "Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ],", "'#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6',", "Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ],", "[ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green", "'#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\",", "'#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1',", "\"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449',", "\"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ],", "'#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155',", "'#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7',", "'#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[", "'#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a',", "\"Turquoise\": [ \"#1abc9c\", \"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'],", "'#4d5656' ], \"Asbestos\" :[ '#f2f4f4', '#e5e8e8', '#ccd1d1', '#b2babb', '#99a3a4', '#7f8c8d', '#707b7c', '#616a6b', '#515a5a',", "'#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc',", "'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ],", "'#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\",", "'#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf',", "'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ],", "'#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9',", "'#f39c12', '#d68910', '#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e',", "'#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041', '#f39c12', '#d68910', '#b9770e', '#9c640c',", "'#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573',", "'#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea',", "'#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a',", "], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16',", "'#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded',", "'#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f', '#fef9e7', '#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f',", "'#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef',", "\"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef',", "'#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2',", "'#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e',", "'#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085',", "'#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6',", "'#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471', '#f5b041',", "'#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866',", "'#935116', '#784212', ], 'Pumpkin':[ '#d35400', '#fbeee6', '#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000',", "'#f6ddcc', '#edbb99', '#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec',", "'#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9',", "Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ],", "'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ],", "'#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063',", "'#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a',", "'#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053',", "'#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1', '#f1948a', '#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126',", "'#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db', '#ebf5fb', '#d6eaf8', '#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1',", "'#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[", "'#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d', '#1c2833', '#17202a', ], 'Sunflower':[ '#f1c40f',", "'#34495e', '#ebedef', '#d6dbdf', '#aeb6bf', '#85929e', '#5d6d7e', '#34495e', '#2e4053', '#283747', '#212f3c', '#1b2631', ], 'Midnight", "'#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[", "'#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd',", "'#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6',", "'#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf',", "'#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a',", "'#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ], 'Pumpkin':[", "'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ],", "'#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\",", "'#aed6f1', '#85c1e9', '#5dade2', '#3498db', '#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8',", "Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65', '#0e6655', '#0b5345' ],", "'#cd6155', '#c0392b', '#a93226', '#922b21', '#7b241c', '#641e16', ], 'Clouds':[ '#ecf0f1', '#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7',", "'#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50', '#273746', '#212f3d',", "'#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6',", "], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9', '#d7dbdd', '#cacfd2', '#bdc3c7', '#a6acaf', '#909497', '#797d7f', '#626567',", "'#ec7063', '#e74c3c', '#cb4335', '#b03a2e', '#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880',", "], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f',", "'#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0', '#52be80', '#27ae60', '#229954', '#1e8449', '#196f3d', '#145a32', ], '<NAME>':[ '#3498db',", "'#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7', '#f8f9f9', '#f2f3f4', '#e5e7e9',", "'#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3',", "\"#e8f8f5\", \"#d1f2eb\", '#a3e4d7', '#76d7c4', '#48c9b0', '#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085',", "'#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974', '#512e5f', ], 'Wisteria'", "'#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5', '#9b59b6', '#884ea0', '#76448a', '#633974',", "\"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60', '#e9f7ef', '#d4efdf', '#a9dfbf', '#7dcea0',", "'#797d7f', '#626567', ], \"Concrete\":[ '#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e',", "'#943126', '#78281f', ], 'Pomegranate':[ '#c0392b', '#f9ebea', '#f2d7d5', '#e6b0aa', '#d98880', '#cd6155', '#c0392b', '#a93226', '#922b21',", "'#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d', '#16a085', '#138d75', '#117a65',", "'#b9770e', '#9c640c', '#7e5109', ], \"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e',", "'#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7', '#fdebd0', '#fad7a0', '#f8c471',", "'#117a65', '#0e6655', '#0b5345' ], \"Emerald\":[ '#2ecc71', '#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\",", "'#fdfefe', '#fbfcfc', '#f7f9f9', '#f4f6f7', '#f0f3f4', '#ecf0f1', '#d0d3d4', '#b3b6b7', '#979a9a', '#7b7d7d', ], \"Silver\":[ '#bdc3c7',", "'#1abc9c', '#17a589', '#148f77', '#117864', '#0e6251'], \"Green Sea\":[ '#16a085', '#e8f6f3', '#d0ece7', '#a2d9ce', '#73c6b6', '#45b39d',", "'#283747', '#212f3c', '#1b2631', ], 'Midnight Blue':[ '#2c3e50', '#eaecee', '#d5d8dc', '#abb2b9', '#808b96', '#566573', '#2c3e50',", "'#2980b9', '#2471a3', '#1f618d', '#1a5276', '#154360,' ], 'Amethyst':[ '#9b59b6', '#f5eef8', '#ebdef0', '#d7bde2', '#c39bd3', '#af7ac5',", "'#2e86c1', '#2874a6', '#21618c', '#1b4f72', ], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7',", "'#e59866', '#dc7633', '#d35400', '#ba4a00', '#a04000', '#873600', '#6e2c00', ], 'Alizarin':[ '#e74c3c', '#fdedec', '#fadbd8', '#f5b7b1',", "'#fcf3cf', '#f9e79f', '#f7dc6f', '#f4d03f', '#f1c40f', '#d4ac0d', '#b7950b', '#9a7d0a', '#7d6608', ], 'Orange':[ '#f39c12', '#fef5e7',", "], 'Belize Hole':[ '#2980b9', '#eaf2f8', '#d4e6f1', '#a9cce3', '#7fb3d5', '#5499c7', '#2980b9', '#2471a3', '#1f618d', '#1a5276',", "'#e8daef', '#d2b4de', '#bb8fce', '#a569bd', '#8e44ad', '#7d3c98', '#6c3483', '#5b2c6f', '#4a235a', ], 'Wet Asphalt':[ '#34495e',", "\"Carrot\":[ '#e67e22', '#fdf2e9', '#fae5d3', '#f5cba7', '#f0b27a', '#eb984e', '#e67e22', '#ca6f1e', '#af601a', '#935116', '#784212', ],", "'#95a5a6', '#f4f6f6', '#eaeded', '#d5dbdb', '#bfc9ca', '#aab7b8', '#95a5a6', '#839192', '#717d7e', '#5f6a6a', '#4d5656' ], \"Asbestos\"", "'#eafaf1', \"#d5f5e3\", \"#abebc6\", \"#82e0aa\", \"#58d68d\", \"#2ecc71\", \"#28b463\", \"#239b56\", \"#1d8348\", \"#186a3b\" ], 'Nephritis':[ '#27ae60'," ]
[ "from devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters, mixins, status from rest_framework.permissions", "if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception", "render, HttpResponse from utils.tools.tools import unique from devops_backend.settings import TMP_DIR from rest_framework import", "= unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as", "while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with", "= 'root' content = { 'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user,", "render # Create your views here. from django.shortcuts import render, HttpResponse from utils.tools.tools", "False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as e: port = '22'", "index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def", "**kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except:", "upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True:", "try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' +", "'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request,", "def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, )", "= self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\":", "except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except", "permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False},", "e: port = '22' try: user = self.request.query_params['user'] except Exception as e: user", "def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try:", "{ 'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return", "if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key) break else: continue return", "True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path,", "'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try:", "(IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else", "render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request, *args,", "import os # def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes", "= pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not", "self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str", "Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST)", "Response from servers.models import Server import os # def index(request): # return render(request,", "render(request, 'index.html', content) def upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key", "status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from servers.models import Server", "False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return", "django.shortcuts import render # Create your views here. from django.shortcuts import render, HttpResponse", "try: user = self.request.query_params['user'] except Exception as e: user = 'root' content =", "'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request): if request.method", "from rest_framework.response import Response from servers.models import Server import os # def index(request):", "if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename", "status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\":", "ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html',", "'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request):", "# Create your views here. from django.shortcuts import render, HttpResponse from utils.tools.tools import", "else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str =", "'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content)", "import IsAuthenticated from rest_framework.response import Response from servers.models import Server import os #", "ssh_key = pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if", "devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters, mixins, status from rest_framework.permissions import", "'remote_addr': remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request): if request.method == 'POST':", "permission_classes = (IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if", "unique from devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters, mixins, status from", "Exception as e: user = 'root' content = { 'host': ip_addr, 'port': port,", "status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as e: port = '22' try:", "filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+')", "django.shortcuts import render, HttpResponse from utils.tools.tools import unique from devops_backend.settings import TMP_DIR from", "rest_framework.response import Response from servers.models import Server import os # def index(request): #", "from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from servers.models import Server import", "list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr", "e: user = 'root' content = { 'host': ip_addr, 'port': port, 'user': user,", "viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from", "self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False},", "self.request.query_params['port'] except Exception as e: port = '22' try: user = self.request.query_params['user'] except", "import render, HttpResponse from utils.tools.tools import unique from devops_backend.settings import TMP_DIR from rest_framework", "from utils.tools.tools import unique from devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters,", "= request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR,", "import viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response", "pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path):", "return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False},", "(self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e:", "+ ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr)", "'root' content = { 'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr':", "content = { 'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr", "Server import os # def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin):", "Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)):", "as e: port = '22' try: user = self.request.query_params['user'] except Exception as e:", "ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key)", "import unique from devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters, mixins, status", "content) def upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8')", "Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception", "as e: user = 'root' content = { 'host': ip_addr, 'port': port, 'user':", "os # def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes =", ") def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR')", "= { 'host': ip_addr, 'port': port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr }", "return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as e: port", "Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as e: port =", "ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr", "os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key) break else:", "request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path = os.path.join(TMP_DIR, filename)", "from rest_framework import viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response", "def upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while", "return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request,", "'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request, *args, **kwargs):", "= self.request.query_params['port'] except Exception as e: port = '22' try: user = self.request.query_params['user']", "or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return", "'index.html', content) def upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key =", "= 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN)", "IsAuthenticated from rest_framework.response import Response from servers.models import Server import os # def", "as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as", "Exception as e: port = '22' try: user = self.request.query_params['user'] except Exception as", "from django.shortcuts import render, HttpResponse from utils.tools.tools import unique from devops_backend.settings import TMP_DIR", "servers.models import Server import os # def index(request): # return render(request, 'index.html') class", "import Server import os # def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet,", "class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr", "= '22' try: user = self.request.query_params['user'] except Exception as e: user = 'root'", "= self.request.query_params['user'] except Exception as e: user = 'root' content = { 'host':", "from django.shortcuts import render # Create your views here. from django.shortcuts import render,", "filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key) break else: continue", "port, 'user': user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content) def", "user, 'current_user': self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request): if", "self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_'", "rest_framework import viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import", "port = self.request.query_params['port'] except Exception as e: port = '22' try: user =", "from servers.models import Server import os # def index(request): # return render(request, 'index.html')", "self.request.query_params['user'] except Exception as e: user = 'root' content = { 'host': ip_addr,", "TMP_DIR from rest_framework import viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated from", "False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try:", "= self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if", "== 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename = unique()", "import Response from servers.models import Server import os # def index(request): # return", "try: port = self.request.query_params['port'] except Exception as e: port = '22' try: user", "user = self.request.query_params['user'] except Exception as e: user = 'root' content = {", "request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr =", "unique() ssh_key_path = os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f:", "mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from servers.models import", "return render(request, 'index.html', content) def upload_ssh_key(request): if request.method == 'POST': pkey = request.FILES.get('pkey')", "request.method == 'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename =", "your views here. from django.shortcuts import render, HttpResponse from utils.tools.tools import unique from", "not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key) break else: continue return HttpResponse(filename)", "HttpResponse from utils.tools.tools import unique from devops_backend.settings import TMP_DIR from rest_framework import viewsets,", "Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port']", "e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port = self.request.query_params['port'] except Exception as e:", "self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not", "pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path =", "filters, mixins, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from servers.models", "except Exception as e: user = 'root' content = { 'host': ip_addr, 'port':", "self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\":", "= (IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR')", "'22' try: user = self.request.query_params['user'] except Exception as e: user = 'root' content", "port = '22' try: user = self.request.query_params['user'] except Exception as e: user =", "remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request): if request.method == 'POST': pkey", "= os.path.join(TMP_DIR, filename) if not os.path.isfile(ssh_key_path): with open(ssh_key_path, 'w+') as f: f.write(ssh_key) break", "Create your views here. from django.shortcuts import render, HttpResponse from utils.tools.tools import unique", "remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return", "status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port", "self.request.user, 'remote_addr': remote_addr } return render(request, 'index.html', content) def upload_ssh_key(request): if request.method ==", "*args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR') if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip']", "return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server') or", "user = 'root' content = { 'host': ip_addr, 'port': port, 'user': user, 'current_user':", "import render # Create your views here. from django.shortcuts import render, HttpResponse from", "here. from django.shortcuts import render, HttpResponse from utils.tools.tools import unique from devops_backend.settings import", "ip_addr if not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except", "} return render(request, 'index.html', content) def upload_ssh_key(request): if request.method == 'POST': pkey =", "mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr = self.request.META.get('HTTP_X_FORWARD_FOR')", "except Exception as e: port = '22' try: user = self.request.query_params['user'] except Exception", "if self.request.META.get('HTTP_X_FORWARD_FOR') else self.request.META.get('REMOTE_ADDR') try: ip_addr = self.request.query_params['ip'] except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN)", "# return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self,", "utils.tools.tools import unique from devops_backend.settings import TMP_DIR from rest_framework import viewsets, filters, mixins,", "IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated, ) def list(self, request, *args, **kwargs): remote_addr =", "'POST': pkey = request.FILES.get('pkey') ssh_key = pkey.read().decode('utf-8') while True: filename = unique() ssh_key_path", "views here. from django.shortcuts import render, HttpResponse from utils.tools.tools import unique from devops_backend.settings", "except: return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) permission_str = 'servers.login_' + ip_addr if not (self.request.user.has_perm('servers.login_server')", "not (self.request.user.has_perm('servers.login_server') or self.request.user.has_perm(permission_str)): return Response({\"permission\": False}, status=status.HTTP_403_FORBIDDEN) try: Server.objects.filter(ip=ip_addr) except Exception as", "rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from servers.models import Server import os", "try: Server.objects.filter(ip=ip_addr) except Exception as e: return Response({\"permission\": False}, status=status.HTTP_400_BAD_REQUEST) try: port =", "import TMP_DIR from rest_framework import viewsets, filters, mixins, status from rest_framework.permissions import IsAuthenticated", "# def index(request): # return render(request, 'index.html') class IndexViewSet(viewsets.ViewSet, mixins.ListModelMixin): permission_classes = (IsAuthenticated," ]
[ "toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title')", "= await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql =", "True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] =", "values in init_vals # call form_vars.restore(), which triggers on_clean() # call form_vars.init() with", "form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller,", "called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars']", "await form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col))", "tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in", "caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj =", "obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols =", "form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] =", "init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await", "inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals = {} for col in", "use this to store attributes - use sub_type columns instead # only use", "in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols}", "None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await", "= await form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml)", "load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars =", "\"\"\" for deleted_obj in orig_dbobj: # anything left has been deleted await obj_names.init(init_vals={'name':", "\"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company,", "descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals)", "init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller,", "await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\"", "caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all()", "attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from common import log, debug async", "if not None or default xml_val = await db_obj.get_val_for_xml(col_name) # returns None if", "form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn", "await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #-----------------------------------------------------------------------------", "# break # parent = parent.parent pass async def load_toolbar(caller, xml): # called", "'name': obj_name, 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM", "= {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals = {}", "outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from", "elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object',", "= elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals)", "conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await", "caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_toolbar(caller,", "inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name',", "'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods')", "order=[('seq', False)]) async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col", "form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq,", "is None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] =", "= caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs']", "\"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld =", "= await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for", "= caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\"", "'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name',", "memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\"", "{} for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq", "all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml,", "init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await", "the values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name", "= caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await", "col_names.save() # \"\"\" for deleted_obj in orig_dbobj: # anything left has been deleted", "= {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals = {} for", "xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from", "inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await", "order=[]) async for _ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await", "= await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row')", "frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for", "method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr',", "= etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for _ in all_body: elem_xml", "caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type'))", "= elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type ==", "'required') output_cols = ('name', 'type', 'source') async def load_ioparms(caller, xml): # called from", "await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): #", "frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type',", "await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button']", "etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame,", "= (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object',", "def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on xml element, but only", "treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml,", "await form_vars.getval('inline_xml') # for name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form')", "'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await", "col_name, attr_name=None): # create attribute on xml element, but only if not None", "# mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from')", "#----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async", "= {} for col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] =", "False)]) async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in", "setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await", "_ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml,", "tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label',", "deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name',", "init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars", "init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from", "elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml,", "'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] =", "init_vals=init_vals) await method.save() async def dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars", "'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj',", "await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml =", "caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params')", "subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await", "subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:]", "memcol.select_many(where=[], order=[]) async for _ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr':", "= {} for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] =", "= col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld", "col) obj_name = await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async", "then mem_obj, so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for", "form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol']", "memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj:", "set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars =", "init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): #", "for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await", "__main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True)", "inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller,", "xml): # called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml", "init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml):", "form_xml.iterchildren('inline_form'): # do not descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml':", "'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj", "setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml = await", "await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type)", "== 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await", "body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow'))", "'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame'", "= caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _", "form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value',", "await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): # called from setup_form_ioparams", "= {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld", "if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] =", "'help_msg') async def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars']", "= etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button", "methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml):", "db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM", "await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save() async def", "col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False,", "obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async", "instead # only use it to store sub_elements # P.S. it is ok", "dbobj, col) obj_name = await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\"", "\"\"\" table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await", "async for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _", "body = caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for", "form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml =", "set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs", "await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml", "called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj =", "form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form'))", "'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name':", "form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from", "memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if obj_name in orig_memobj:", "grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:]", "form_vars, to trigger on_clean() # however, inline_xml is a 'list' which includes etree", "subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'),", "'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type']", "set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml)", "await body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars']", "None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects')", "caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async", "await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called", "form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml,", "is a 'list' which includes etree Elements # this cannot be serialised to", "caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq',", "memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init()", "toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml):", "= {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals = {} for", "!= 'virt' \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id)", "= frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={}", "await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr})", "set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol:", "in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals)", "obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await", "seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body')", "caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col: await", "setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml,", "values from 'body' on loading, and replace the values # in 'body' on", "body = caller.data_objects['body'] # N.B. do not use this to store attributes -", "etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame,", "gui link (cleaner?) async def dump_body(caller, xml): # called from setup_form_body 'before_save' body", "obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _", "# for col in button_cols} init_vals = {} for col in button_cols: init_vals[col]", "elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await", "back the values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id'", "order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col", "we want to 'save' form_vars, to trigger on_clean() # however, inline_xml is a", "called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[],", "'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): # called", "from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml =", "in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\"", "'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM", "'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml'))", "init_vals = {} for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq']", "etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute", "form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals", "sql = ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {}", "async def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method", "= {} for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] =", "table_id = {} \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER", "db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr,", "elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await", "init_vals = {} for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq']", "\"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} \" \"AND col_name", "only use it to store sub_elements # P.S. it is ok to store", "await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names", "schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0])", "frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] =", "'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is None:", "form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml):", "obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id =", "async def before_start_body(caller, xml): # called from setup_form_body 'before_start_form' # parent = caller.parent", "schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from common import log,", "'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml", "'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey',", "init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for", "memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a", "await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await", "init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row')", "col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names)", "await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool']", "= (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] =", "dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars']", "anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml)", "form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects')", "etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from setup_form_inlline grid_frame", "load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method']", "not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick", "elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:]", "await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj, so", "= etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml):", "elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:]", "await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): # called from setup_form 'before_save'", "caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is None: await form_vars.init()", "await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs =", "xml_val is not None: if attr_name is None: # if not specified, use", "init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars", "OD from lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api", "xml): # called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars", "an alias, without gui link (cleaner?) async def dump_body(caller, xml): # called from", "tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await", "await memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): # called from", "etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml,", "init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={}", "init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type", "'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml", "= parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent pass", "in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name]", "on xml element, but only if not None or default xml_val = await", "etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:]", "else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body')", "log, debug async def init_xml(caller, xml): # called from setup_form after form_name if", "= inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param')", "xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from setup_form", "enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals", "await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml): # called from setup_form_methods", "fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None:", "= await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await", "etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async", "# this cannot be serialised to JSON, so the save fails # the", "seq, obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col", "setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml,", "in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if obj_name in", "obj_xml in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col", "await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml =", "caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml", "etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean'", "# update form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next", "dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={", "orig_dbobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await", "'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller,", "in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = (", "memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol: memcol_xml", "= etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session() # need independent connection", "input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in", "= elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars =", "grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml,", "memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names =", "for seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for", "'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj,", "'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate',", "methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name',", "{} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] =", "- use sub_type columns instead # only use it to store sub_elements #", "seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save()", "if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn", "'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:]", "= obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all()", "db.api db_session = db.api.start_db_session() # need independent connection for reading import os.path import", "deleted_obj in orig_dbobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await", "= memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col')", "in separate mem_objects, # get the values from 'body' on loading, and replace", "JSON, so the save fails # the trick is as follows - #", "body=e.args[0]) # update form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the", "'list' which includes etree Elements # this cannot be serialised to JSON, so", "= {} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name]", "await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars", "form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names =", "# body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body", "await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save()", "\"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT", "await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): #", "in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] =", "store copies of attributes in separate mem_objects, # get the values from 'body'", "init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async", "await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type',", "etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _ in", "orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'),", "form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template',", "await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml,", "\"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'),", "= await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml):", "called from setup_form after form_name if form does not exist form_defn = caller.data_objects['form']", "all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml,", "frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await", "= elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars =", "'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml):", "caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml')", "values # in 'body' on dumping elem_type = await body.getval('type') elem_xml = await", "'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] =", "await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as", "'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id',", "want to 'save' form_vars, to trigger on_clean() # however, inline_xml is a 'list'", "# get the values from 'body' on loading, and replace the values #", "'op', 'tgt') async def before_start_body(caller, xml): # called from setup_form_body 'before_start_form' # parent", "outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) #", "{} AND col_type != 'virt' \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id')", "inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame'", "= caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs: output_xml", "order=[('seq', False)]) async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col", "obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in", "'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def", "= db.api.start_db_session() # need independent connection for reading import os.path import __main__ schema_path", "= etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _", "is as follows - # save all values in init_vals # call form_vars.restore(),", "\"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table(", "set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name':", "'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller, xml): # called", "await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql = (", "memcol_xml.get(col)) # for col in memcol_cols} init_vals = {} for col in memcol_cols:", "= etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol =", "def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button =", "init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form']", "#'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for", "#----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols =", "setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml,", "obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name':", "memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for", "form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr')", "await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow", "async def dump_body(caller, xml): # called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml", "in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol =", "= parent.parent pass async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars", "init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame')", "await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def", "if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await", "#'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name =", "'name': col_name, 'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in", "gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml,", "init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\"", "init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all()", "elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars", "frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for", "= await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')})", "button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in", "= etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async", "grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await", "elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col',", "for col in output_cols} init_vals = {} for col in output_cols: init_vals[col] =", "or default xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None or equal", "'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml): # called from", "col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} AND col_type != 'virt'", "= await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql =", "obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj", "for seq, output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for", "obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={", "for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await", "'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj", "col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml = await", "caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods", "await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await", "= memcol.select_many(where=[], order=[]) async for _ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'),", "the values from 'body' on loading, and replace the values # in 'body'", "seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for", "frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async", "await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names =", "set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml =", "in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for", "etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for", "(await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await", "dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={", "= await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set", "buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml):", "memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name':", "connection for reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser =", "#----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols = ('name',", "memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr':", ".format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await", "enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq", "gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await", "await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names", "# the next bit is a trick # we want to 'save' form_vars,", "method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods =", "'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #-----------------------------------------------------------------------------", "fails # the trick is as follows - # save all values in", "# called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs =", "= caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr')", "#set up memcols for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals", "obj_name, 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns", "caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), })", "inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError)", "grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object'))", "col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[],", "for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await", "frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml']", "= (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await", "a 'list' which includes etree Elements # this cannot be serialised to JSON,", "etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml'))", "body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[],", "for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj']", "exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml,", "await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): # called", "# in 'body' on dumping elem_type = await body.getval('type') elem_xml = await body.getval('elem')", "'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods: method_xml =", "= caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row')", "= await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await", "init_vals = {} for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq']", "in init_vals # call form_vars.restore(), which triggers on_clean() # call form_vars.init() with init_vals,", "dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars =", "async def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml", "memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'):", "inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals", "def init_xml(caller, xml): # called from setup_form after form_name if form does not", "await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await", "toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all()", "#----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from setup_form_inline", "before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' # parent = caller.parent # while", "'body' on loading, and replace the values # in 'body' on dumping elem_type", "prevent running 'on_clean' # could make an alias, without gui link (cleaner?) async", "etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:]", "init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods')", "caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng'))", "in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save()", "seq for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is", "col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False,", "obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name,", "col_names.save() for deleted_obj in orig_memobj: # anything left has been deleted await obj_names.init(init_vals={'name':", "tool_cols} init_vals = {} for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col))", "col in memobj_cols} init_vals = {} for col in memobj_cols: init_vals[col] = await", "'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def", "cannot be serialised to JSON, so the save fails # the trick is", "dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns", "memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for this memobj for seq, memcol_xml", "# init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals =", "# 'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a })", "methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template',", "etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await", "= seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for this memobj", "cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT", "await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name':", "inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for", "async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool", "= await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] = val await", "= caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs: input_xml", "setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None:", "obj_name = await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else:", "obj_xml.get(col)) # for col in memobj_cols} init_vals = {} for col in memobj_cols:", "seq await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): # called from", "grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object')", "= elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars =", "from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await", "IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for col_name, descr in", "in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs,", "frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not", "AibError from common import log, debug async def init_xml(caller, xml): # called from", "= etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await", "called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row')", "await col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table(", "'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] =", "caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[],", "'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml')", "= form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] =", "#----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action',", "# inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result", "gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object')", "with init_vals, which puts back the values init_vals = {} for col_defn in", "= caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type ==", "tool.save() async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars']", "'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars =", "await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await", "forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars", "from setup_form_toolbar 'before_start_form' # parent = caller.parent # while True: # if 'obj_names'", "= (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] =", "await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name,", "{col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals = {} for col", "{}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql)", "parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_body(caller, xml): #", "= db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name =", "form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq,", "seq, method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col", "obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is", "called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml =", "next bit is a trick # we want to 'save' form_vars, to trigger", "method_xml.get(col)) # for col in method_cols} init_vals = {} for col in method_cols:", "# for col in memobj_cols} init_vals = {} for col in memobj_cols: init_vals[col]", "= (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await", "setup_form_toolbar 'before_start_form' # parent = caller.parent # while True: # if 'obj_names' in", "from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await", "body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a", "body_xml = await form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml',", "await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols =", "for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col,", "'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr')", "col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() #", "= await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr':", "form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name',", "= (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #-----------------------------------------------------------------------------", "# save all values in init_vals # call form_vars.restore(), which triggers on_clean() #", "etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml =", "async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] #", "'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml", "subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml =", "await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if obj_name in orig_memobj: await", "= caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj", "{col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals = {} for col", "= await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all()", "form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called", "AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" #", "await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _ in all_cols: await col_names.init(init_vals={", "table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name':", "= caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col:", "== 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml']", "obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq':", "NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for col_name, descr", "# the trick is as follows - # save all values in init_vals", "await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs']", "body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] =", "_ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml,", "xml): # called from setup_form_toolbar 'before_start_form' # parent = caller.parent # while True:", "subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async", "(await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type ==", "'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): # called", "await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name)", "= subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async", "init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals = {}", "'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt')", "input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await", "col in output_cols} init_vals = {} for col in output_cols: init_vals[col] = await", "= ('name', 'obj_name', 'action') async def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame'", "(await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object'))", "body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes']", "in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs,", "= caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template')", "# parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session() # need", "async def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml", "all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml,", "inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for name, frame_xml in inline_params:", "caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[],", "'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml): # called from setup_form_body", "this cannot be serialised to JSON, so the save fails # the trick", "= (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] =", "_ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await", "\"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company,", "def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml =", "dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr':", "def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool =", "inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async", "seq}) await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'):", "init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml): #", "memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: # anything left has been deleted", "in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars']", "conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await", "form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols", "form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in", "in input_cols} init_vals = {} for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col,", "etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): #", "await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): #", "'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- #", "called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do not use this", "set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj =", "None or default xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None or", "'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): # called from", "form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is a trick # we want", "= elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name,", "returns None if None or equal to default if xml_val is not None:", "dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button']", "= caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all()", "await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id,", "in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await", "val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean'", "left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #-----------------------------------------------------------------------------", "is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes =", "method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals = {} for col in", "form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml =", "frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars =", "_ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml,", "= await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs", "body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame", "await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for", "grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await", "#----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd',", "elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame':", "table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list:", "in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql =", "await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols =", "body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean' # could make an alias,", "'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars", "await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col))", "await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is", "reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path,", "obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj", "= {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals = {} for", "if None or equal to default if xml_val is not None: if attr_name", "form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form'))", "button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml):", "set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml',", "#----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def load_methods(caller, xml):", "= await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr':", "{}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id,", "# for name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name',", "# for col in dbobj_cols} init_vals = {} for col in dbobj_cols: init_vals[col]", "'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars']", "False)]) async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in", "set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _ in", "'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await body.getval('elem') if", "# await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start',", "input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await", "NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id) )", "body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean' # could", "elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj':", "all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml, button, col)", "col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name", "if val is not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True)", "obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save()", "for col in input_cols} init_vals = {} for col in input_cols: init_vals[col] =", "col in memcol_cols} init_vals = {} for col in memcol_cols: init_vals[col] = await", "= etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:]", "in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break #", "etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller,", "setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml =", "AND col_type != 'virt' \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \"", "obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def", "== 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await", "subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng',", "'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars,", "body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods')", "await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save'", "for seq, obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for", "= (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars =", "= frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml", "elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] =", "form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals = {col:", "in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml,", "db_obj.get_val_for_xml(col_name) # returns None if None or equal to default if xml_val is", "db.objects import db.api db_session = db.api.start_db_session() # need independent connection for reading import", "await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection()", "enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols}", "await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar')", "( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name'))", "= {} for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] =", "# parent = parent.parent pass async def load_body(caller, xml): # called from setup_form_body", "obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols =", "col in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body", "separate mem_objects, # get the values from 'body' on loading, and replace the", "init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): #", "(await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type ==", "etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj: memobj_xml =", "body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type", "init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save()", "memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await", "body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height',", "xml): # called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\"", "called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists:", "to JSON, so the save fails # the trick is as follows -", "form does not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name'))", "called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj =", "in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool,", "caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title'))", "row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur =", "await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar')", "grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame':", "col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False,", "col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\"", "inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml')", "to trigger on_clean() # however, inline_xml is a 'list' which includes etree Elements", "= await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml']", "load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await", "init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params')", "await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await", "caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title',", "buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template',", "set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _ in", "await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), #", "= elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object']", "orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql =", "cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql = ( \"SELECT", "# called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml =", "async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type", "caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col,", "frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml)", "= etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals)", "elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr']", "collections import OrderedDict as OD from lxml import etree # parser = etree.XMLParser(remove_blank_text=True)", "inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in", "in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols}", "memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for", "as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM {}.db_tables", "obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await", "'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await", "inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except", "N.B. do not use this to store attributes - use sub_type columns instead", "dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await", "async def load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body", "parent = parent.parent pass async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame'", "form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await", "'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq',", "memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend',", "= await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml)", "enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals", "caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names) all_col =", "xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars']", "load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body']", "None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[],", "db_session = db.api.start_db_session() # need independent connection for reading import os.path import __main__", "frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars']", "input_cols = ('name', 'type', 'target', 'required') output_cols = ('name', 'type', 'source') async def", "await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if", "descr}) await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table", "'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition',", "== 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col'))", "etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:]", "if methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await", "await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml", "'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM", "await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _ in", "# call form_vars.init() with init_vals, which puts back the values init_vals = {}", "await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn", "inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs']", "setup_form after form_name if form does not exist form_defn = caller.data_objects['form'] form_xml =", "form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) #", "col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if obj_name", "body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object'))", "methods_xml = await form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml',", "False)]) async for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in", "the next bit is a trick # we want to 'save' form_vars, to", "dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals =", "without gui link (cleaner?) async def dump_body(caller, xml): # called from setup_form_body 'before_save'", "etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title',", "load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await", "'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await", "form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals", "buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save()", "form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn =", ".format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id':", "'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:]", "default xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None or equal to", "memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) #", "mem_obj, so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _", "= etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)])", "col in method_cols} init_vals = {} for col in method_cols: init_vals[col] = await", "'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml):", "= await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body')", "caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names", "caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await", "not use this to store attributes - use sub_type columns instead # only", "gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml']", "elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller,", "('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup',", "for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await", "in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj,", "= etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create", "= await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save() async", "await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up", "# do not descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'),", "= caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await", "await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None:", "= ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name'))", "def load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars", "set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj =", "dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml", "frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml =", "tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller,", "conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await", "= await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name':", "await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) #", "import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session()", "descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await", "element, but only if not None or default xml_val = await db_obj.get_val_for_xml(col_name) #", "all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col)", "setup_form_body 'before_start_form' # parent = caller.parent # while True: # if 'obj_names' in", "obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn =", "#----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols = ('name', 'type', 'source') async", "init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals = {}", "frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if", "memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] =", "method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml, method, col) await", "# a trick to prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals", "form_xml) \"\"\" # the next bit is a trick # we want to", "order=[('seq', False)]) async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col", "called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml =", "col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr})", "'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:]", "serialised to JSON, so the save fails # the trick is as follows", "if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name,", "await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await", "= await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml):", "as OD from lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import", "= frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] =", "= {} for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] =", "memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name", "'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def", "( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} AND col_type", "= caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await", "etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await", "in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml =", "all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names) print()", "await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar", "'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called", "'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type',", "obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq,", "in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await", "for col in tool_cols} init_vals = {} for col in tool_cols: init_vals[col] =", "elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] =", "treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame':", "#----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml):", "async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml", "dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await", "methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save()", "obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name)", "etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async", "for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql", "table_id = {} AND col_type != 'virt' \" \"AND col_name NOT IN ('row_id',", "elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] =", "('name', 'type', 'source') async def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars", "from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml", "init_vals = {} for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq']", "None if None or equal to default if xml_val is not None: if", "as follows - # save all values in init_vals # call form_vars.restore(), which", "'save' form_vars, to trigger on_clean() # however, inline_xml is a 'list' which includes", "init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form']", "before_start_body(caller, xml): # called from setup_form_body 'before_start_form' # parent = caller.parent # while", "dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars", "it is ok to store copies of attributes in separate mem_objects, # get", "= os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common", "from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names']", "deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name',", "outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for", "init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type", "await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols =", "# await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml,", "body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={", "('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for col_name, descr in await", "remove_blank_text=True) from common import AibError from common import log, debug async def init_xml(caller,", "= frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do", "= seq for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val", "(etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new", "for col in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- #", "is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods')", "'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons: button_xml =", "await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols =", "to store attributes - use sub_type columns instead # only use it to", "elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars", "subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None:", "memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): #", "form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods:", "def dump_body(caller, xml): # called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml =", "inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) #", "from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do not use this to", "= await form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await frame_vars.init() return init_vals={}", "to prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem']", "dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as", "'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars,", "grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await", "init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else:", "# a trick to prevent running 'on_clean' # could make an alias, without", "orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names']", "_ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml,", "form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names =", "etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:]", "obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr':", "for col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await", "'body' on dumping elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type", "'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name':", "not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects')", "form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols", "button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) #", "await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for _", "link (cleaner?) async def dump_body(caller, xml): # called from setup_form_body 'before_save' body =", "\" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\"", "for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await", "seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for this memobj for", "# init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals =", "obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if", "# toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut',", "await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for", "obj_row_id = await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr':", "xml): # called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml", "'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align',", "form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form')", "def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml =", "body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean' for seq, elem_xml", "parent.parent pass async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars =", "form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save()", "await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if", "seq await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml): # called from", "= etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params", "init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={}", "all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col)", "form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq, button_xml in", "{} \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type,", "= etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async", "'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr')", "frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar')", "a trick to prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals =", "= etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml", "await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for", "# called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if", "'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] =", "form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form')", "toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml',", "'seq': seq}) await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in", "async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols:", "'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: #", "memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals = {} for col in", "table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in", "from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj']", "async for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title',", "await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) #", "{col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals = {} for col", "async def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml", "memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await", "# await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is", "\"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur", "'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #-----------------------------------------------------------------------------", "default if xml_val is not None: if attr_name is None: # if not", "await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals = {", "frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await", "for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not", "= button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button')", "await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') #", "await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await", "(await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars']", "been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj", "col in input_cols} init_vals = {} for col in input_cols: init_vals[col] = await", "is None: # if not specified, use col_name attr_name = col_name elem_xml.set(attr_name, xml_val)", "has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- #", "name) # inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError,", "for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await", "'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name')", "= etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml',", "}) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id':", "memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol']", "elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame':", "await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required')", "await form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml']", "def dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars =", "obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name')", "seq\" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={", "await form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await", "col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False,", "setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml =", "for seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for", "button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals = {} for col in", "short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await", "on dumping elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type ==", "frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml']", "caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async", "toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save()", "= frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] =", "all values in init_vals # call form_vars.restore(), which triggers on_clean() # call form_vars.init()", "await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] = val await body.init(display=False,", "caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _ in all_obj:", "await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names =", "for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await", "body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent", "async def init_xml(caller, xml): # called from setup_form after form_name if form does", "etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:]", "load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await", "await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col))", "form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all()", "order=[]) async for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name'))", "if body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) #", "need independent connection for reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas')", "db_obj, col_name, attr_name=None): # create attribute on xml element, but only if not", "dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): #", "in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals)", "col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'),", "table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save()", "'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await", "col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml):", "memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars", "\"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for", "break # parent = parent.parent pass async def load_toolbar(caller, xml): # called from", "in tool_cols} init_vals = {} for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col,", "init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml']", "#'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name =", "await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title')))", "form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean' for seq,", "async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols:", "= memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj')", "col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name =", "all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml,", "#----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name',", "init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] =", "# called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars =", "def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml =", "db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller,", "init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save()", "_ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml,", "'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in", "'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break", "{col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals = {} for col", "form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in", "'source') async def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars']", "trick is as follows - # save all values in init_vals # call", "dump_body(caller, xml): # called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body')", "await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) #", "subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await", "'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql):", "await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await", "load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars']", "# inline forms #----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from setup_form_inline grid_frame", "'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')):", "etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[],", "setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)])", "memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in", "buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg')", "elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars']", "'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async", "subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None:", "called from setup_form_body 'before_start_form' # parent = caller.parent # while True: # if", "xml): # called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do not", "caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in", "gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await", "= caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for _", "button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons =", "async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button", "P.S. it is ok to store copies of attributes in separate mem_objects, #", "memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) #", "form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool =", "mem_objects, # get the values from 'body' on loading, and replace the values", "await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml',", "= caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'),", "outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller,", "etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:]", "await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr',", "def load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body =", "Elements # this cannot be serialised to JSON, so the save fails #", "copies of attributes in separate mem_objects, # get the values from 'body' on", "# await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows',", "= ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type',", "'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names =", "of attributes in separate mem_objects, # get the values from 'body' on loading,", "init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all()", "caller.data_objects['body'] # N.B. do not use this to store attributes - use sub_type", "await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr})", "descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql", "col in dbobj_cols} init_vals = {} for col in dbobj_cols: init_vals[col] = await", "called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars']", "= caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is None: methods_xml", "if form does not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await", "(cleaner?) async def dump_body(caller, xml): # called from setup_form_body 'before_save' body = caller.data_objects['body']", "= dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await", "= caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await", "{col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals = {} for col", "or equal to default if xml_val is not None: if attr_name is None:", "body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in", "'action') async def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' # parent =", "= elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] =", "inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals =", "= form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form',", "col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals", "all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool, col)", "called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names =", "= etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from common", "# form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn", "init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): # called from setup_form_dbobj", "init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type')", "'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml", "'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:]", "caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await", "store sub_elements # P.S. it is ok to store copies of attributes in", "elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await", "# caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent pass async def", "= await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] =", "# inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError,", "# methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def load_methods(caller, xml): #", "button_cols} init_vals = {} for col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col))", "in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol,", "if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row')", "{col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals = {} for", "'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' #", "await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): #", "caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs: input_xml =", "outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in", "= seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for", "= caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await", "subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def", "'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError", "buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)])", "await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml", "init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars", "form_xml is None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml']", "frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip',", "dbobj_cols} init_vals = {} for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col))", "col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table", "'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): # called from setup_form_toolbar", "= etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] =", "FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql)", "= caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml in enumerate(inputs_xml): # init_vals = {col:", "memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is", "'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt',", "import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True,", "in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for", "body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await", "('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called from", "# P.S. it is ok to store copies of attributes in separate mem_objects,", "await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type)", "async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols:", "pass async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars']", "button = caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals =", "init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await", "form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db", "short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await", "left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #-----------------------------------------------------------------------------", "\"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async for col_name, descr in await", "= seq await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): # called", "= {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals = {} for", "for reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser(", "inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml']", "display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name',", "caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await", "= caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[],", "async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols:", "'{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__()", "'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await", "== 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr'))", "await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for", "'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr',", "import OrderedDict as OD from lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import", "await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await", "deleted_obj in orig_memobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await", "'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for", "method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols", "= '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await", "parent.parent pass async def load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars =", "body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml):", "None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:]", "body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals)", "in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def", "= dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj')", "= caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is None: await", "else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = (", "#----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async", "await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml']", "etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:]", "body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml", "for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)])", "# await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable',", "form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await", "in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #-----------------------------------------------------------------------------", "method_cols} init_vals = {} for col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col))", "seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col", "await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- #", "'method') for col in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #-----------------------------------------------------------------------------", "form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml,", "async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT row_id,", "etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] =", "inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs:", "# await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow',", "'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def", "ok to store copies of attributes in separate mem_objects, # get the values", "await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml = await", "orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names']", "memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a }) await obj_names.save() all_cols", "print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all()", "obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol", "memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id')", "= form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml']", "xml): # called from setup_form_body 'before_start_form' # parent = caller.parent # while True:", "= caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)])", "await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml']", "for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() #", "try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) #", "grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr'))", "frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[])", "button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await", "caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col,", "await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] =", "# called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj", "= val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent running", "frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row')", "= await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection()", "setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None:", "is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template')))", "# break # parent = parent.parent pass async def load_body(caller, xml): # called", "db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for", "for col in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- #", "'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async", "col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml')", "for seq, input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for", "#----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def load_methods(caller, xml): # called from", "(await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async", "inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await frame_vars.init()", "col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False,", "= parent.parent pass async def load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars", "body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml,", "xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None or equal to default", "await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save'", "subtype, }) await subtypes.save() async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save'", "'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr':", "await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars,", "output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in", "await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype", "= outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param')", "seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag", "form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore()", "xml element, but only if not None or default xml_val = await db_obj.get_val_for_xml(col_name)", "caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml):", "enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals", "= body.select_many(where=[], order=[('seq', False)]) async for _ in all_body: elem_xml = etree.SubElement(body_xml, await", "= {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals = {} for", "init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals = {}", "memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): #", "toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action')", "def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml =", "caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _ in", "get the values from 'body' on loading, and replace the values # in", "= {} for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] =", "the values # in 'body' on dumping elem_type = await body.getval('type') elem_xml =", "await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body')", "= etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for", "# init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals", "frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr')", "await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence',", "= caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar')", "frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await", "form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names']", "#----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions',", "table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr", "common import log, debug async def init_xml(caller, xml): # called from setup_form after", "setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs =", "memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml", "for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr':", "form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq',", "# we want to 'save' form_vars, to trigger on_clean() # however, inline_xml is", "order=[('seq', False)]) async for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col", "= await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await", "init_vals = {} for col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq']", "dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={", "form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)])", "col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await", "dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = (", "inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for", "'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml): # called", "attr_name=None): # create attribute on xml element, but only if not None or", "load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from", "parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition", "mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols", "methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def load_methods(caller, xml): # called", "('name', 'obj_name', 'action') async def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars", "'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml),", "'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars", "body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml", "'button') for col in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #-----------------------------------------------------------------------------", "from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await", "elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif", "treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:]", "seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): #", "\"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for", "set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml'))", "await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml']", "called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar')", "await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name,", "etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await", "= await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save() async", "= seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): # called", "False)]) async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in", "caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in", "caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for _ in", "trick to prevent running 'on_clean' # could make an alias, without gui link", "'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml'))", "form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template',", "form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame =", "# for col in output_cols} init_vals = {} for col in output_cols: init_vals[col]", "'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start',", "= elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type ==", "in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await", "obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq})", "inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate", "= caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _", "is a trick # we want to 'save' form_vars, to trigger on_clean() #", "'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc',", "debug async def init_xml(caller, xml): # called from setup_form after form_name if form", "await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml", "async def dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars", "outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save'", "init_vals = {} for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq']", "await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name':", "gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:]", "await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq, button_xml", "'target', 'required') output_cols = ('name', 'type', 'source') async def load_ioparms(caller, xml): # called", "to store sub_elements # P.S. it is ok to store copies of attributes", "init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for this", "frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals = { 'name':", "async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on xml element, but", "'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save'", "await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await", "for col in memobj_cols} init_vals = {} for col in memobj_cols: init_vals[col] =", "# called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml", "_ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await", "for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _ in", "subtypes.save() async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body']", "frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _", "'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async", "exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals,", "await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng',", "'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools: tool_xml", "'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): #", "= caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq,", "form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template',", "order=[]) async for _ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await", "col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj", "await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq':", "await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar')", "await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem", "await dbobj.init() async def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars =", "(await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type ==", "'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): # called from", "await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col,", "# parent = caller.parent # while True: # if 'obj_names' in parent.data_objects: #", "# called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if", "buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml',", "await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns')", "= etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _", "await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml", "called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj =", "= ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len',", "async def dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method", "= await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] =", "set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object',", "body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body =", "button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller,", "'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): # called", "('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml):", "init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): # called", "init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml']", "'validation', 'help_msg') async def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars =", "await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if", "caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] =", "import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')),", "'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #-----------------------------------------------------------------------------", "caller.parent # while True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names']", "None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to", "col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as", "after form_name if form does not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form')", "= caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _", "called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj =", "enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals", "def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml =", "obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'),", "async def load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars']", "caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name'))", "tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def", "= await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn:", "memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol in", "'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is db_obj", "method.save() async def dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars']", "await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml =", "\" \"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async for col_name, descr in", "dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml): # called from", "'on_clean' # could make an alias, without gui link (cleaner?) async def dump_body(caller,", "dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn:", "def dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method =", "output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #-----------------------------------------------------------------------------", "in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals)", "for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db", "await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq", "set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on xml element, but only if", "await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await", "elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj',", "async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols:", "init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals = {}", "e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml await form_defn.setval('form_xml', form_xml)", "col in tool_cols} init_vals = {} for col in tool_cols: init_vals[col] = await", "if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter')", "if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] #", "'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op',", "'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars,", "memobj.init() async def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars']", "'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async for col_name, descr", "form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools:", "body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start'))", "dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql =", "body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows'))", "'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async", "in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals)", "etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools =", "inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals = { 'name': inline_xml.get('name'), 'title':", "= caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)])", "\"WHERE table_id = {} AND col_type != 'virt' \" \"AND col_name NOT IN", "init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml", "tool_xml.get(col)) # for col in tool_cols} init_vals = {} for col in tool_cols:", "# \"\"\" table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name)", "#----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name',", "descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={", ") cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name':", "caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await", "await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for this memobj for seq,", "'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await", "button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for", "make an alias, without gui link (cleaner?) async def dump_body(caller, xml): # called", "cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'),", "not descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await", "'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] =", "'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar')", "memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj:", "= caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] =", "# await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name',", "async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name,", "await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None:", "with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is a", "call form_vars.restore(), which triggers on_clean() # call form_vars.init() with init_vals, which puts back", "frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml'))", "init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals = {}", "init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml']", "button.init(display=False, init_vals=init_vals) await button.save() async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save'", "sub_elements # P.S. it is ok to store copies of attributes in separate", "await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar')", "= parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_toolbar(caller, xml):", "os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True,", "outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs:", "= caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml =", "memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id =", "'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name',", "in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in", "await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name,", "for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await", "running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml", "however, inline_xml is a 'list' which includes etree Elements # this cannot be", "await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml): #", "called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml =", "form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs =", "= caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals", "elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:]", "for col in button_cols} init_vals = {} for col in button_cols: init_vals[col] =", "elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val =", "for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if", "await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list):", "obj_name = await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with", "await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr})", "await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq',", "init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save()", "dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params')", "load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await", "init_vals, which puts back the values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]:", "body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:]", "OrderedDict as OD from lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects", "output_cols} init_vals = {} for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col))", "{} for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq", "form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] =", "= caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _ in", "the save fails # the trick is as follows - # save all", "form_vars.init() with init_vals, which puts back the values init_vals = {} for col_defn", "output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml):", "prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] =", "col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} \" \"AND col_name NOT", "common import AibError from common import log, debug async def init_xml(caller, xml): #", "= caller.data_objects['body'] # N.B. do not use this to store attributes - use", "cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT", "await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await", "inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for", "'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field',", "body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods')", "= ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller,", "inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={}", "'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): # called from setup_form_buttonrow", "in form_xml.iterchildren('inline_form'): # do not descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'),", "await inline_vars.save() async def dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn =", "= await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for", "= elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals)", "await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller,", "enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals", "await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form')", "= ('name', 'type', 'target', 'required') output_cols = ('name', 'type', 'source') async def load_ioparms(caller,", "col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml')", "# while True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] #", "'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr':", "init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller,", "await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: # anything", "await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await", "# anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml',", "('name', 'type', 'target', 'required') output_cols = ('name', 'type', 'source') async def load_ioparms(caller, xml):", "table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={", "all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml,", "#----------------------------------------------------------------------------- async def load_inline(caller, xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars =", "'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:]", "elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml)", "obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr", "'shortcut', 'action') async def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' # parent", "body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all()", "xml): # called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type')", "form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals", "'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:]", "await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save() async def", "if xml_val is not None: if attr_name is None: # if not specified,", "io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols = ('name', 'type', 'source')", "on_clean() # however, inline_xml is a 'list' which includes etree Elements # this", "etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await", "'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from common import log, debug", "init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars", "= caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml']", "etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml)", "elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) #", "await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = (", "await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _", "obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols for", "= caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml))", "from common import log, debug async def init_xml(caller, xml): # called from setup_form", "await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await", "body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={}", "setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for", "_ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title'))", "the trick is as follows - # save all values in init_vals #", "seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save()", "method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in", "False)]) async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in", "= ('type', 'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller,", "'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr',", "= ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} AND", "async def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml", "caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await", "inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object')", "setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml')", "col_type, seq\" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql): await", "body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable'))", "for deleted_obj in orig_memobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj})", "etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml =", "outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals =", "= form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form',", "form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names =", "in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms", "input_cols} init_vals = {} for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col))", "= elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type ==", "outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def load_inline(caller,", "caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await", "from setup_form_body 'before_start_form' # parent = caller.parent # while True: # if 'obj_names'", "= frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals)", "form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object')", "= await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql =", "(await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars']", "= ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} \"", "treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await", "memcol, col) obj_name = await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name})", "in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) #", "await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml", "('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type',", "order=[('seq', False)]) async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col", "dbobj.init() async def dump_db_obj(caller, xml): # called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars']", "in orig_dbobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete()", "'choices', 'sql') async def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars =", "'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml): # called from setup_form_body 'before_start_form'", "is not None: if attr_name is None: # if not specified, use col_name", "dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await", "be serialised to JSON, so the save fails # the trick is as", "await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init()", "'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml'))", "= await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #-----------------------------------------------------------------------------", "if form_xml is None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects')", "orig_memobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await", "subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml =", "etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #-----------------------------------------------------------------------------", "False)]) async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in", "body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj", "'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj", "None or equal to default if xml_val is not None: if attr_name is", "form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq, input_xml", "col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id)", "body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] =", "form_xml = await form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await frame_vars.init() return", "col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {}", "n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _ in all_cols:", "await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml =", "all_obj = obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[],", "'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all()", "None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to", "await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean' #", "'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol", "'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame", "= await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names']", "await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean' # could make an", "async for _ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in", "memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name',", "set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq',", "subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes =", "def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' # parent = caller.parent #", "await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = (", "orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'),", "init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if", "# inline_params = await form_vars.getval('inline_xml') # for name, frame_xml in inline_params: # inline_xml", "all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs, col)", "elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:]", "val is not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) #", "body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml =", "_ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml,", "table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save()", "in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals)", "= await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr':", "buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def load_methods(caller,", "async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols:", "body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml =", "is db_obj then mem_obj, so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[])", "been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms", "= '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr =", "import AibError from common import log, debug async def init_xml(caller, xml): # called", "= caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await", "def before_start_body(caller, xml): # called from setup_form_body 'before_start_form' # parent = caller.parent #", "= await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml']", "etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml)", "= ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice',", "await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in", "xml): # called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml')", "from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name')", "for col in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- #", "#----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body']", "create attribute on xml element, but only if not None or default xml_val", "db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE", "= frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml,", "\"\"\" # the next bit is a trick # we want to 'save'", "# called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml =", "dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr':", "form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn =", "= (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type", "memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml", "set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml'))", "do not use this to store attributes - use sub_type columns instead #", "= etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml',", "else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), #", "None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await", "col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={", "form_vars.restore(), which triggers on_clean() # call form_vars.init() with init_vals, which puts back the", "form_vars = caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template',", "elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj,", "memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol", "method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods') await", "# called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj", "= await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names", "dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn:", "col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name", "= seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async def dump_db_obj(caller, xml):", "this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col))", "= (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type)", "short_descr FROM {}.db_columns \" \"WHERE table_id = {} \" \"AND col_name NOT IN", "init_vals # call form_vars.restore(), which triggers on_clean() # call form_vars.init() with init_vals, which", "grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml", "treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type',", "( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) )", "xml): # called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml')", "await outputs.save() async def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars =", "from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame')", "inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] =", "init_vals=init_vals) await memobj.save() #set up memcols for this memobj for seq, memcol_xml in", "body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] =", "elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): #", "all_methods = method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods: method_xml = etree.SubElement(methods_xml,", "tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals =", "elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars", "body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on xml", "await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await", "in memcol_cols} init_vals = {} for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col,", "def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B.", "'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml)", "called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs']", "# init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols} init_vals =", "frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #-----------------------------------------------------------------------------", "col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for", "input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs =", "obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in", "= caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)])", "'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name", "'virt' \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) )", "col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'):", "= await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await", "\"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await", "is ok to store copies of attributes in separate mem_objects, # get the", "= etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml", "await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool", "seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col", "if attr_name is None: # if not specified, use col_name attr_name = col_name", "await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes", "obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name')", "= col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals =", "row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur", "dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init() async", "init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row')", "xml): # called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml')", "inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs =", "#----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn = caller.data_objects['form']", "button.save() async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars = caller.data_objects['form_vars']", "in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols}", "memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id':", "caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn", "await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, })", "= caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml,", "init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save()", "subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller, xml): # called from", "async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols:", "in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params') outputs", "col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml')", "await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all() for seq,", "for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id", "row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur =", "await tool.save() async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars =", "for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for", "await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table =", "setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol", "await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await", "form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons:", "form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): #", "lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session =", "async for _ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'),", "body.select_many(where=[], order=[('seq', False)]) async for _ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type'))", "init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml(", "IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async", "form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[], order=[('seq', False)])", "(await treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj'))", "await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for name, frame_xml", "dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool']", "loading, and replace the values # in 'body' on dumping elem_type = await", "short_descr FROM {}.db_columns \" \"WHERE table_id = {} AND col_type != 'virt' \"", "etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml)", "from 'body' on loading, and replace the values # in 'body' on dumping", "{} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor',", "form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await", "from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name')", "elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif", "= '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await", "col in button_cols} init_vals = {} for col in button_cols: init_vals[col] = await", "for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols: await", "await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await", "\"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body') await", "'before_start_form' # parent = caller.parent # while True: # if 'obj_names' in parent.data_objects:", "inline_vars.save() async def dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn = caller.data_objects['form']", "body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly',", "def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml =", "grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await", "await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml", "(await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await", "init_vals = {} for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq']", "input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs, col) await", "await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan',", "caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names", "'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: # anything left has", "trick to prevent running 'on_clean' for seq, elem_xml in enumerate(body_xml): init_vals = {}", "(await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars']", "col in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml = etree.Element('output_params')", "= seq await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml): # called", "await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] =", "table_id) ) async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id,", "for _ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in body_cols:", "'title') all_tools = tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools: tool_xml =", "await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _", "memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq',", "init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name", "tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols} init_vals = {} for col in", "def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method =", "await button.save() async def dump_buttonrow(caller, xml): # called from setup_form_buttonrow 'before_save' form_vars =", "'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await", "async def dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars']", "= ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name'))", "sql = ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company,", "etree Elements # this cannot be serialised to JSON, so the save fails", "memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol", "caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is", "= elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals)", "await memobj.save() #set up memcols for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')):", "inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs = caller.data_objects['inputs'] await inputs.delete_all()", "dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names =", "= await form_vars.getval('memobj_xml') orig_memobj = set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names']", "memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for", "set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is", "async def load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars", "dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method']", "'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form'", "elif elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml,", "order=[('seq', False)]) async for _ in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col", "subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller, xml): #", "for seq, elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] =", "# for col in tool_cols} init_vals = {} for col in tool_cols: init_vals[col]", "await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await", "form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template',", "# could make an alias, without gui link (cleaner?) async def dump_body(caller, xml):", "etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row') etree.SubElement(frame,", "etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:]", "await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await", "WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr", "'label', 'tip', 'lng', 'name', 'obj_name', 'col_name', 'shortcut', 'action') async def before_start_toolbar(caller, xml): #", "frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar", "if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp')", "form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in", "elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] await body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr',", "= (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame': treeframe_vars =", "db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\"", "load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do", "caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async", "caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars,", "trigger on_clean() # however, inline_xml is a 'list' which includes etree Elements #", "xml): # called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs", "caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid':", "'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source',", "col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name)", "'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev',", "await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): #", "from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is", "memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is None: body_xml", "'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'):", "= {} for col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] =", "enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame')", "etree.SubElement(frame, 'button_row') etree.SubElement(frame, 'frame_methods') await form_defn.setval('form_xml', form_xml) await load_form_xml(caller, xml) #----------------------------------------------------------------------------- # form_funcs", "'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): #", "inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) #", "} await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): # called from setup_form", "col in memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async", "'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar',", "'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val',", "await memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a }) await obj_names.save()", "await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await", "caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar')", "= await form_vars.getval('inline_xml') # for name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml,", "etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await", "init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object']", "{} for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq", "= { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save()", "etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using schema try:", "'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng':", "form_defn.getval('form_xml') if form_xml is None: await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] =", "= set((memobj.get('name') for memobj in memobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj", "caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is", "('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml):", "'obj_name', 'action') async def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars =", "init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence')", "await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj", "from lxml import etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session", "xml): # called from setup_form after form_name if form does not exist form_defn", "# called from setup_form_toolbar 'before_start_form' # parent = caller.parent # while True: #", "'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await", "in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml", "'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form',", "all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col)", "'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml", "schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from", "outputs_xml = etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for", "'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg',", "obj_name, 'descr': await memobj.getval('descr'), # 'seq': await memobj.getval('seq'), # seq is db_obj then", "in orig_memobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete()", "{ 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async", "'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body') etree.SubElement(frame, 'button_row')", "# called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if", "# await body.setval('auto_start', await grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is", "set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml'))", "grid_vars.getval('auto_start')) # await body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type)", "memcol_cols} init_vals = {} for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col))", "col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in orig_dbobj:", "caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals =", "in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj", "# init_vals = {col: button.get_val_from_xml(col, button_xml.get(col)) # for col in button_cols} init_vals =", "includes etree Elements # this cannot be serialised to JSON, so the save", "= {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey',", "so the save fails # the trick is as follows - # save", "elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns')", "init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars", "obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target',", "frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml)", "col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in orig_dbobj: # anything left has", "col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False,", "None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None):", "in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name': memobj.get('name'), 'descr': memobj.get('descr')}) await obj_names.save() obj_row_id = await", "init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals = {}", "for col in memcol_cols} init_vals = {} for col in memcol_cols: init_vals[col] =", "'type', 'target', 'required') output_cols = ('name', 'type', 'source') async def load_ioparms(caller, xml): #", "form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq,", "'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml'))", "{} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld in", ".format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__()", "'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif", "elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await", "etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params =", "await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) #", "dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj:", "col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'):", "it to store sub_elements # P.S. it is ok to store copies of", "frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars,", "in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for col in method_cols}", "# if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names']", "seq is db_obj then mem_obj, so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[],", "form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await", "store attributes - use sub_type columns instead # only use it to store", "'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in orig_dbobj: # anything left", "# db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def", "= etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml',", "= caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col:", "async for _ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name')", "in button_cols} init_vals = {} for col in button_cols: init_vals[col] = await button.get_val_from_xml(col,", "caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is None: methods_xml =", "= elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={", "form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml,", "'{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__()", "col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml')", "col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj", "subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes", "which includes etree Elements # this cannot be serialised to JSON, so the", "= await db_obj.get_val_for_xml(col_name) # returns None if None or equal to default if", "for col in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await", "caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml", "await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is None: methods_xml = etree.Element('frame_methods')", "elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body, col)", "on loading, and replace the values # in 'body' on dumping elem_type =", "elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif", "await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in", "= seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async def", "init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row')", "_ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names =", "await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names", "frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'):", "# called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml", "elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await", "# called from setup_form_body 'before_start_form' # parent = caller.parent # while True: #", "await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn =", "in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await", "db_table = await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr})", "etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await", "use it to store sub_elements # P.S. it is ok to store copies", "= await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object',", "await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols", "elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col':", "= frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] =", "subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await", "in dbobj_cols} init_vals = {} for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col,", "# seq is db_obj then mem_obj, so n/a }) await obj_names.save() all_cols =", "could make an alias, without gui link (cleaner?) async def dump_body(caller, xml): #", "obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr", "caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_body(caller,", "= caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template')", "init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent", "raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\"", "frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals =", "= caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals = {col:", "obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr", "obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml", "}) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _ in all_cols: await", "init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] =", "= caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[])", "subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes:", "frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _ in", "button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols", "button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name',", "= col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async", "memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: # anything left", "# caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent =", "# call form_vars.restore(), which triggers on_clean() # call form_vars.init() with init_vals, which puts", "= caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await", "= ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await", "frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols =", "dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col))", "= obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[])", "caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async", "_ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml,", "attribute on xml element, but only if not None or default xml_val =", "in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #-----------------------------------------------------------------------------", "from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await", "etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml,", "form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await", "# called from setup_form after form_name if form does not exist form_defn =", "elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars']", "dbobj.select_many(where=[], order=[('seq', False)]) async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for", "elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name))", "col_type != 'virt' \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company,", "enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals", "conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() memobj_xml =", "button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def", "dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in", "frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols", "inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using", "for _ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names", "body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #-----------------------------------------------------------------------------", "init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): #", "form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml =", "not None or default xml_val = await db_obj.get_val_for_xml(col_name) # returns None if None", "col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False,", "called from setup_form_methods 'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods')", "await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await", "= elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val", "ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml", "{} for col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq", "xml): # called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml')", "= etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using schema", "await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called", "has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- #", "anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml)", "form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars,", "val = await body.get_val_from_xml(fld.col_name, elem_xml.get(fld.col_name)) if val is not None: init_vals[fld.col_name] = val", "and replace the values # in 'body' on dumping elem_type = await body.getval('type')", "await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={", "\" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async", "to prevent running 'on_clean' # could make an alias, without gui link (cleaner?)", "# for col in input_cols} init_vals = {} for col in input_cols: init_vals[col]", "setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for", "for col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars", "tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) #", "in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals)", "methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)])", "\"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} AND col_type !=", "inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml is None:", "caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs: output_xml =", "inline_params = await form_vars.getval('inline_xml') # for name, frame_xml in inline_params: # inline_xml =", "await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await", "form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is a trick #", "for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if", "_ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _ in all_col:", ".format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await", "grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml,", "= etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml'))", "# called from setup_form 'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars =", "body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await body.setval('subtype_col', await subtype_vars.getval('subtype_col')) await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml", "= seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): # called", "await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save()", "'deleted_id') \" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql): await", "= dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name,", "order=[]) async for _ in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for", "'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml')", "db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for", "'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col',", "treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'),", "= etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml',", "'parent', 'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr',", "is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick", "= caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars,", "'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml = await form_vars.getval('inputs_xml') outputs_xml = await form_vars.getval('outputs_xml') inputs", "'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled',", "elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on xml element,", "init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml']", "'before_save' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form')", "which puts back the values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: #", "deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml', memobjs_xml) #----------------------------------------------------------------------------- # io_parms #-----------------------------------------------------------------------------", "dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name", "= caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in", "'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label',", "await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml", "await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals = {} for col", "all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml,", "method = caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml is None:", "_ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml,", "await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await dbobj.save() await dbobj.init()", "# \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name') db_table = await db.objects.get_db_table( form_defn.context,", "on_clean() # call form_vars.init() with init_vals, which puts back the values init_vals =", "using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError',", "import db.objects import db.api db_session = db.api.start_db_session() # need independent connection for reading", "from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs", "toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate',", "# N.B. do not use this to store attributes - use sub_type columns", "descr}) await col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table = await", "memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await", "tool_xml = etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool, col) await", "in output_cols} init_vals = {} for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col,", "memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml']", "follows - # save all values in init_vals # call form_vars.restore(), which triggers", "caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq',", "elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml,", "'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller,", "form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name',", "# called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all()", "obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _ in all_cols: await col_names.init(init_vals={ 'name':", "form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is", "print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld", "form_vars.save() button = caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals", "# await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml,", "col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng',", "= etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from setup_form_inlline", "is None: elem_xml = etree.Element(elem_type) await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name,", "output_xml.get(col)) # for col in output_cols} init_vals = {} for col in output_cols:", "obj_name}) orig_memobj.remove(obj_name) else: await obj_names.init(init_vals={ 'name': obj_name, 'descr': await memobj.getval('descr'), # 'seq': await", "= {} \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" \"ORDER BY", "for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await", "setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names = caller.data_objects['obj_names'] col_names", "await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml):", "init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] =", "await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body')", "# await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name',", "as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml await form_defn.setval('form_xml',", "'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for", "tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml,", "await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await", "which triggers on_clean() # call form_vars.init() with init_vals, which puts back the values", "{} for col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq", "( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) )", "await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await", "from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj']", "form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml,", "= parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_body(caller, xml):", "except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with", "input_xml.get(col)) # for col in input_cols} init_vals = {} for col in input_cols:", "etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods') await frame_vars.init(init_vals=init_vals) async", "caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] frame_xml = etree.Element('frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml,", "for seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) #", "form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async def", "method_cols = ('name', 'obj_name', 'action') async def load_methods(caller, xml): # called from setup_form_methods", "elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif", "= tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool')", "all_cols = memcol.select_many(where=[], order=[]) async for _ in all_cols: await col_names.init(init_vals={ 'name': await", "for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj']", "etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button =", "does not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml,", "xml): # called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await", "caller.data_objects['button'] await button.delete_all() for seq, button_xml in enumerate(buttonrow_xml): # init_vals = {col: button.get_val_from_xml(col,", "only if not None or default xml_val = await db_obj.get_val_for_xml(col_name) # returns None", "frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label', 'tip', 'lng', 'name', 'obj_name',", "= etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await", "col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld =", "'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): # called", "if buttonrow_xml is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await", "# exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await", "= ('name', 'type', 'source') async def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame'", "col_name NOT IN ('row_id', 'created_id', 'deleted_id') \" .format(caller.company, table_id) ) async for col_name,", "form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if", "# body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng',", "= (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] =", "# for col in method_cols} init_vals = {} for col in method_cols: init_vals[col]", "await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default',", "toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title',", "= caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml = await form_vars.getval('methods_xml') if methods_xml", "= (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif elem_type", "'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation',", "grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter')", "elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await", "for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await", "caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] =", "FROM {}.db_columns \" \"WHERE table_id = {} AND col_type != 'virt' \" \"AND", "(await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await", "form_vars.getval('inline_xml') # for name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') #", "= etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent running", "form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types',", "init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type", "= caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml,", "{} for col in tool_cols: init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq", "'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name',", "grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] #", "all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml, method, col)", "independent connection for reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser", "{} for col in memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq", "col in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline", "method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml):", "parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent pass async", "table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr =", "xml): # called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars", "= caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) #", "form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame')", "'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for", "= caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if", "col_name, 'descr': descr}) await col_names.save() memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'):", "load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await", "init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]: val = await", "\"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj')", "#----------------------------------------------------------------------------- # form_funcs #----------------------------------------------------------------------------- async def load_form_xml(caller, xml): # called from setup_form 'on_start_frame'", "async for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols:", "caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects')", "all_col: print(col_names) print() \"\"\" \"\"\" obj_name_fld = await body.getfld('obj_name') obj_names = obj_name_fld.foreign_key['tgt_field'].db_obj await", "# called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body =", "set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if obj_name in orig_dbobj: orig_dbobj.remove(obj_name) else:", "inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml", "for seq, method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col, method_xml.get(col)) # for", "'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] =", "\"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur", "grid_vars.getval('data_object')) # await body.setval('obj_descr', await grid_vars.getval('obj_descr')) # await body.setval('growable', await grid_vars.getval('growable')) # await", "col) elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- #", "= caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml')", "# io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols = ('name', 'type',", "col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq',", "elem_xml[:] = (await body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem", "method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller, xml): # called from setup_form_methods 'before_save'", "'action') async def load_methods(caller, xml): # called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars']", "memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save() \"\"\" body_xml =", "= etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml',", "trick # we want to 'save' form_vars, to trigger on_clean() # however, inline_xml", "in input_cols: init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals)", "# need independent connection for reading import os.path import __main__ schema_path = os.path.join(os.path.dirname(__main__.__file__),", "for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await", "setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml = await body.getval('elem')", "init_vals['methods_xml'] = elem_xml.find('frame_methods') await treeframe_vars.init(init_vals=init_vals) elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await", "import db.api db_session = db.api.start_db_session() # need independent connection for reading import os.path", "await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params')", "= etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name =", "all_col = col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names) print() \"\"\" \"\"\"", "table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save()", "memobj.save() #set up memcols for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): #", "'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label',", "{} for col in output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq", "}) await subtypes.save() async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body", "await body.setval('lng', await subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes =", "seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col", "'sequence', 'sub_types', 'tree_params', 'actions', 'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head',", "def dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars = caller.data_objects['inline_vars'] frame_vars", "toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template',", "'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] =", "None: if attr_name is None: # if not specified, use col_name attr_name =", "dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT", "col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False,", "form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] form_xml = etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title',", "treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type']", "obj_xml.get(col)) # for col in dbobj_cols} init_vals = {} for col in dbobj_cols:", "# \"\"\" for deleted_obj in orig_dbobj: # anything left has been deleted await", "frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- #", "WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id,", "treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None:", "setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do not use this to store", "parent.data_objects['col_names'] # break # parent = parent.parent pass async def load_toolbar(caller, xml): #", "outputs.save() async def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars']", "in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')})", "caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col,", "= inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name',", "is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence')", "body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml,", "set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async def", "caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent = parent.parent", "'row_id' col_name = col_defn.col_name init_vals[col_name] = await form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False)", "'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name')", "caller.data_objects['form_vars'] button = caller.data_objects['button'] buttonrow_xml = etree.Element('button_row') await set_if_not_none(buttonrow_xml, form_vars, 'btn_template', 'template') all_buttons", "= etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await", "obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent',", "elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await", "treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml,", "inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): # called from", "etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml)", "in all_buttons: button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml, button,", "body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] #", "in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols}", "body.getval('elem'))[:] form_vars = caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def", "await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col))", "if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body')", "triggers on_clean() # call form_vars.init() with init_vals, which puts back the values init_vals", "col in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow", "'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names']", "for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await", "etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml)", "for _ in all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq':", "{col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals = {} for col", "short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur =", "deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #-----------------------------------------------------------------------------", "memobj_cols: await set_if_not_none(memobj_xml, memobj, col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _", "( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id = {} \" \"AND", "# called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') orig_memobj", "method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save() async def dump_methods(caller,", "descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql", "form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template',", "treeframe_vars.getval('methods_xml'))[:] elif elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await body.setval('subtype_obj', await subtype_vars.getval('subtype_obj')) await", "in enumerate(toolbar_xml): # init_vals = {col: tool.get_val_from_xml(col, tool_xml.get(col)) # for col in tool_cols}", "await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is", "update form_definition with new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit", "False)]) async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes,", "dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await dbobj.init(display=False, init_vals=init_vals) await", "memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save() #set up memcols", "parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names'] = parent.data_objects['col_names'] # break # parent", "form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml,", "await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save()", "await col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is None: body_xml =", "'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns \"", "memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals = {} for col in", "await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- # inline forms #----------------------------------------------------------------------------- async", "= caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml']", "seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async def dump_ioparms(caller, xml): # called from", "'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await form_vars.getval('inputs_xml')) form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame')", "{}.db_columns \" \"WHERE table_id = {} AND col_type != 'virt' \" \"AND col_name", "etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await", "'db_obj') for col in dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name')", "equal to default if xml_val is not None: if attr_name is None: #", "col) obj_name = await memobj.getval('name') if obj_name in orig_memobj: await obj_names.init(init_vals={'name': obj_name}) orig_memobj.remove(obj_name)", "= etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] =", "col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action')", "FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await", "from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await", "= caller.parent # while True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] =", "form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await", "col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with", "await subtypes.save() async def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body =", "obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await", "method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await", "frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar')", "frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _ in all_inline:", "all_cols: await col_names.init(init_vals={ 'name': await memcol.getval('col_name'), 'descr': await memcol.getval('short_descr'), 'seq': await memcol.getval('seq')}) await", "caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): #", "await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) #", "all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj: memobj_xml = etree.SubElement(memobjs_xml,", "subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id', False)]) async for", "method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for", "for name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name)", "setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars =", "await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is a trick # we", "col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if obj_name", "called from setup_form_methods 'on_start_frame' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] await method.delete_all() methods_xml", "# called from setup_form_body.grid_frame 'on_start_frame' body = caller.data_objects['body'] # N.B. do not use", "memcols for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals = {col:", "button_xml = etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml, button, col) await", "setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await", "grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:]", "await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean' for seq, elem_xml in", "called from setup_form_toolbar 'before_start_form' # parent = caller.parent # while True: # if", "'is_formview_obj') async def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars = caller.data_objects['form_vars']", "attributes in separate mem_objects, # get the values from 'body' on loading, and", "'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods: method_xml", "await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col))", "obj_row_id = await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'),", "for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save()", "subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type) await body.setval('elem',", "output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs, col) await", "all_inline = inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form')", "= caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj =", "in memobj_cols} init_vals = {} for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col,", "await memcol.getval('seq')}) await col_names.save() for deleted_obj in orig_memobj: # anything left has been", "'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule',", "(await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:] elif elem_xml is None: elem_xml = etree.Element(elem_type) await", "xml): # called from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body", "from common import AibError from common import log, debug async def init_xml(caller, xml):", "dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql", "# init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals =", "elem_xml.find('toolbar') init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods')", "running 'on_clean' # could make an alias, without gui link (cleaner?) async def", "'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called from setup_form_dbobj", ") async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name':", "etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await", "= method.select_many(where=[], order=[('seq', False)]) async for _ in all_methods: method_xml = etree.SubElement(methods_xml, 'method')", "col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in orig_dbobj: # anything", "async def before_start_toolbar(caller, xml): # called from setup_form_toolbar 'before_start_form' # parent = caller.parent", "# validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as", "'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks',", "import log, debug async def init_xml(caller, xml): # called from setup_form after form_name", "called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml", "seq await memcol.init(display=False, init_vals=init_vals) await memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller,", "gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml is None: elem_xml = etree.Element(elem_type)", "table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr =", "dbobj_cols: await set_if_not_none(dbobj_xml, dbobj, col) obj_name = await dbobj.getval('name') if obj_name in orig_dbobj:", "await memobj.init() async def dump_mem_obj(caller, xml): # called from setup_form_memobj 'before_save' form_vars =", "cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name,", "elem_xml in enumerate(body_xml): init_vals = {} init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq']", "'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar')", "with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql = ( \"SELECT row_id, short_descr", "orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db sql", "= {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals = {} for", "obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq})", "await body.setval('elem', elem_xml) async def set_if_not_none(elem_xml, db_obj, col_name, attr_name=None): # create attribute on", "# called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type = await body.getval('type') elem_xml", "'tgt') async def before_start_body(caller, xml): # called from setup_form_body 'before_start_form' # parent =", "form_xml.append(await form_vars.getval('outputs_xml')) frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars,", "conn = db_mem_conn.db sql = ( \"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name", "await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools = tool.select_many(where=[],", "xml): # called from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars']", "elem_xml is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes", "init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals) await memobj.save()", "xml): # called from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml')", "body.setval('main_object', await treeframe_vars.getval('main_object')) await body.setval('obj_descr', await treeframe_vars.getval('obj_descr')) await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml", "memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals = {col: memobj.get_val_from_xml(col,", "etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:] elem_xml[:] = subtypes_xml[:]", "TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update form_definition with new form_xml await", "async def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml", "def load_toolbar(caller, xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml =", "dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects')", "init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr']", "await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('dbobj_xml', dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols", "tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save'", "seq await inputs.init(display=False, init_vals=init_vals) await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq,", "seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): # called from", "init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col in input_cols} init_vals = {}", "init_vals['columns_xml'] = elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await", "is not None: init_vals[fld.col_name] = val await body.init(display=False, init_vals=init_vals) await body.save(from_upd_on_save=True) # a", "outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals = {} for col in", "all_body = body.select_many(where=[], order=[('seq', False)]) async for _ in all_body: elem_xml = etree.SubElement(body_xml,", "up memcols for this memobj for seq, memcol_xml in enumerate(obj_xml.iter('mem_col')): # init_vals =", "(await gridframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await", "_ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] =", "to 'save' form_vars, to trigger on_clean() # however, inline_xml is a 'list' which", "name, frame_xml in inline_params: # inline_xml = etree.SubElement(form_xml, 'inline_form') # inline_xml.set('name', name) #", "= etree.SubElement(memobj_xml, 'mem_col') for col in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name =", "all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in output_cols: await set_if_not_none(output_xml, outputs, col)", "form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj #----------------------------------------------------------------------------- dbobj_cols = ('name', 'table_name', 'parent',", "{}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id,", "caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[], order=[]) async for _ in all_inline: inline_xml = etree.SubElement(form_xml,", "obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await", "form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await", "col in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods", "= await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body')", "output_cols = ('name', 'type', 'source') async def load_ioparms(caller, xml): # called from setup_form_ioparams", "for col in button_cols: init_vals[col] = await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await", "etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq,", "\"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context, caller.company,", "'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow', 'toolbar', 'combo_type', 'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src',", "'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml):", "= await form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml)", "all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body,", "parent = caller.parent # while True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names']", "init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await", "to store copies of attributes in separate mem_objects, # get the values from", "'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): # called from", "for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:]", "'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'), 'lng': await body.getval('lng'), }) subtypes = caller.data_objects['subtypes']", "= etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _ in all_memobj: memobj_xml", "tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols", "= etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] =", "# called from setup_form_dbobj 'do_save' form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj", "obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name = await", "save fails # the trick is as follows - # save all values", "'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml, 'frame_methods')", "FROM {}.db_columns \" \"WHERE table_id = {} \" \"AND col_name NOT IN ('row_id',", "= caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml']", "gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr'))", "order=[('seq', False)]) async for _ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for", "'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before',", "\" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql): await col_names.init(init_vals={", "etree.Element('output_params') outputs = caller.data_objects['outputs'] all_outputs = outputs.select_many(where=[], order=[('seq', False)]) async for _ in", "\"SELECT row_id, short_descr FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, await dbobj.getval('table_name')) )", "= await button.get_val_from_xml(col, button_xml.get(col)) init_vals['seq'] = seq await button.init(display=False, init_vals=init_vals) await button.save() async", "# init_vals = {col: memobj.get_val_from_xml(col, obj_xml.get(col)) # for col in memobj_cols} init_vals =", "tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml):", "= caller.data_objects['form_vars'] await form_vars.setval('body_xml', body_xml) #----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml):", "'obj_descr', 'rowspan', 'colspan', 'value', 'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio',", "break # parent = parent.parent pass async def load_body(caller, xml): # called from", "form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True)", "init_vals[col] = await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save()", "= etree.SubElement(toolbar_xml, 'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml',", "memcol.save() await memobj.init() await memobj.init() async def dump_mem_obj(caller, xml): # called from setup_form_memobj", "\" \"WHERE table_id = {} \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id')", "db_obj then mem_obj, so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async", "dbobj_elem.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns", "\"WHERE table_id = {} \" \"AND col_name NOT IN ('row_id', 'created_id', 'deleted_id') \"", "# create attribute on xml element, but only if not None or default", "os.path.join(os.path.dirname(__main__.__file__), 'schemas') xsd_parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import", "elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] =", "# parent = parent.parent pass async def load_toolbar(caller, xml): # called from setup_form_frame.toolbar", "('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale',", "set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name',", "sub_type columns instead # only use it to store sub_elements # P.S. it", "== 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await body.getval('subtype_col'),", "{}.db_columns \" \"WHERE table_id = {} \" \"AND col_name NOT IN ('row_id', 'created_id',", "in method_cols: await set_if_not_none(method_xml, method, col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #-----------------------------------------------------------------------------", "form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') dbobj = caller.data_objects['dbobj'] await dbobj.delete_all() for", "init_xml(caller, xml): # called from setup_form after form_name if form does not exist", "= await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr':", "await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE table_id", "init_vals = {col: await dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals =", "# only use it to store sub_elements # P.S. it is ok to", "set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for _ in", "= etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await set_if_not_none(toolbar_xml, form_vars, 'tb_title', 'title') all_tools", "form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is None: buttonrow_xml =", "from setup_form_inline grid_frame 'on_start_frame' inline_vars = caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml", "etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame", "init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not descend", "False)]) async for _ in all_outputs: output_xml = etree.SubElement(outputs_xml, 'output_param') for col in", "result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise", "body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj in", "etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] =", "= {} for col in dbobj_cols: init_vals[col] = await dbobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] =", "tool.select_many(where=[], order=[('seq', False)]) async for _ in all_tools: tool_xml = etree.SubElement(toolbar_xml, 'tool') for", "call form_vars.init() with init_vals, which puts back the values init_vals = {} for", "in 'body' on dumping elem_type = await body.getval('type') elem_xml = await body.getval('elem') if", "db_table = await db.objects.get_db_table( form_defn.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr})", "inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): # called from setup_form 'before_save' form_defn", "'key_field', 'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices',", "'data_source', 'condition', 'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql')", "descr}) await obj_names.save() sql = ( \"SELECT col_name, short_descr FROM {}.db_columns \" \"WHERE", "obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await", "body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for _ in all_body:", "attributes - use sub_type columns instead # only use it to store sub_elements", "for _ in all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await", "await conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\"", "async def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml", "for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name = col_defn.col_name init_vals[col_name] = await", "in enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols}", "grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name')) # await body.setval('auto_start', await grid_vars.getval('auto_start')) # await", "inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for name, frame_xml in inline_params: #", "form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await", "= (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type", "frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline =", "async def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars = caller.data_objects['form_vars'] inputs_xml", "init_vals=init_vals) await tool.save() async def dump_toolbar(caller, xml): # called from setup_form_frame.toolbar 'before_save' form_vars", "'btn_template', 'template') all_buttons = button.select_many(where=[], order=[('seq', False)]) async for _ in all_buttons: button_xml", "'descr': descr}) await col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table =", "etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame,", "await col_names.save() # \"\"\" for deleted_obj in orig_dbobj: # anything left has been", "form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml(", "# buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation',", "parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session() # need independent", "\" \"WHERE table_id = {} AND col_type != 'virt' \" \"AND col_name NOT", "'{}'\" .format(caller.company, await dbobj.getval('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await", "'src', 'op', 'tgt') async def before_start_body(caller, xml): # called from setup_form_body 'before_start_form' #", "= (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars =", "from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars", "memobj.get('descr')}) await obj_names.save() obj_row_id = await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await", "= caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await inline_vars.delete_all() form_xml = await form_defn.getval('form_xml') if form_xml", "memobjs_xml) #----------------------------------------------------------------------------- # io_parms #----------------------------------------------------------------------------- input_cols = ('name', 'type', 'target', 'required') output_cols =", "init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml'] = elem_xml.find('button_row') init_vals['methods_xml'] = elem_xml.find('frame_methods') await", "= await form_vars.getval('dbobj_xml') for dbobj in dbobj_xml.iter('db_obj'): async with db_session.get_connection() as db_mem_conn: conn", "db.api.start_db_session() # need independent connection for reading import os.path import __main__ schema_path =", "set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id',", "dbobj = caller.data_objects['dbobj'] dbobjs_xml = etree.Element('db_objects') all_dbobj = dbobj.select_many(where=[], order=[('seq', False)]) async for", "'on_start_frame' body = caller.data_objects['body'] # N.B. do not use this to store attributes", "#----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled', 'btn_validate', 'action', 'validation', 'help_msg') async", "await subtype_vars.getval('lng')) if elem_xml is None: elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml", "form_vars.setval('toolbar_xml', toolbar_xml) #----------------------------------------------------------------------------- # buttonrow #----------------------------------------------------------------------------- button_cols = ('btn_id', 'btn_label', 'lng', 'btn_default', 'btn_enabled',", "await memcol.delete_all() await memobj.delete_all() for seq, obj_xml in enumerate(memobj_xml): # init_vals = {col:", "in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col)) init_vals['seq'] = seq await memobj.init(display=False, init_vals=init_vals)", "'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame' form_vars", "inline_xml is a 'list' which includes etree Elements # this cannot be serialised", "etree # parser = etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session() #", "'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj in orig_dbobj: #", "= frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in", "'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def load_mem_obj(caller, xml): # called from setup_form_memobj", "{col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals = {} for col", "a trick to prevent running 'on_clean' # could make an alias, without gui", "form_vars = caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in", "'before_save' form_vars = caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars,", "init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq'] = seq await method.init(display=False, init_vals=init_vals) await method.save()", "await form_vars.init() await frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml']", "replace the values # in 'body' on dumping elem_type = await body.getval('type') elem_xml", "form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form'))", "not None: if attr_name is None: # if not specified, use col_name attr_name", "= caller.data_objects['form_vars'] method = caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template')", "validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e:", "seq, output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col", "= await db.objects.get_db_table( form_vars.context, caller.company, table_name) await obj_names.init(init_vals={ 'name': obj_name, 'descr': db_table.short_descr}) await", "etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await", "etree.Element('body') all_body = body.select_many(where=[], order=[('seq', False)]) async for _ in all_body: elem_xml =", "if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) #", "= caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] =", "methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals = {col: method.get_val_from_xml(col,", "form_xml.set('name', await form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars,", "xml): # called from setup_form_memobj 'before_save' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml')", "obj_name_fld.foreign_key['tgt_field'].db_obj await obj_names.delete_all() col_name_fld = await body.getfld('col_name') col_names = col_name_fld.foreign_key['tgt_field'].db_obj await col_names.delete_all() dbobj_xml", "frame_xml.append(await frame_vars.getval('methods_xml')) await inline_vars.setval('frame_xml', frame_xml) #----------------------------------------------------------------------------- # toolbar #----------------------------------------------------------------------------- tool_cols = ('type', 'label',", "init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] = await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form']", "'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable',", "elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame':", "col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:] = (await body.getval('elem'))[:] form_vars =", "await body.setval('combo_type', await treeframe_vars.getval('combo_type')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar')", "db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr}) await col_names.save() # \"\"\" for deleted_obj", "order=[('seq', False)]) async for _ in all_memcol: memcol_xml = etree.SubElement(memobj_xml, 'mem_col') for col", "in memcol_cols: await set_if_not_none(memcol_xml, memcol, col) obj_name = await memobj.getval('name') if obj_name in", "for inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals = { 'name': inline_xml.get('name'),", "button_xml.get(col)) # for col in button_cols} init_vals = {} for col in button_cols:", "to default if xml_val is not None: if attr_name is None: # if", "called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml') if toolbar_xml", "in all_methods: method_xml = etree.SubElement(methods_xml, 'method') for col in method_cols: await set_if_not_none(method_xml, method,", "conn.exec_sql(sql): await col_names.init(init_vals={ #'obj_id': obj_row_id, 'name': col_name, 'descr': descr}) await col_names.save() \"\"\" #", "in all_memobj: memobj_xml = etree.SubElement(memobjs_xml, 'mem_obj') for col in memobj_cols: await set_if_not_none(memobj_xml, memobj,", "buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template'))) await form_vars.save() button = caller.data_objects['button'] await button.delete_all()", "form_name if form does not exist form_defn = caller.data_objects['form'] form_xml = etree.Element('form') form_xml.set('name',", "'descr': descr}) await col_names.save() \"\"\" # \"\"\" obj_name = dbobj_elem.get('name') table_name = dbobj_elem.get('table_name')", "col) await form_vars.setval('methods_xml', methods_xml) #----------------------------------------------------------------------------- # body #----------------------------------------------------------------------------- body_cols = ('main_object', 'obj_descr', 'rowspan',", "'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows',", "await tool.get_val_from_xml(col, tool_xml.get(col)) init_vals['seq'] = seq await tool.init(display=False, init_vals=init_vals) await tool.save() async def", "= await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names", "frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline = inline_vars.select_many(where=[],", "caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj = memobj.select_many(where=[], order=[('seq', False)]) async for _ in", "descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': obj_name, 'descr': descr}) await obj_names.save() sql", "= await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await outputs.save() async", "bit is a trick # we want to 'save' form_vars, to trigger on_clean()", "xml): # called from setup_form_frame.toolbar 'on_start_frame' form_vars = caller.data_objects['form_vars'] toolbar_xml = await form_vars.getval('toolbar_xml')", "await body.getval('lng'), }) subtypes = caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await", "obj_name, 'descr': db_table.short_descr}) await obj_names.save() for seq, col_defn in enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name,", "all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id') subtype_xml[:] = (await subtypes.getval('body_xml'))[:]", "save all values in init_vals # call form_vars.restore(), which triggers on_clean() # call", "elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await grid_vars.getval('methods_xml'))[:] elif elem_type == 'grid_frame': gridframe_vars", "FROM {}.db_tables WHERE table_name = '{}'\" .format(caller.company, dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql)", "do not descend init_vals = { 'name': inline_xml.get('name'), 'title': inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), }", "col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml'] = frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] =", "= caller.data_objects['dbobj'] await dbobj.delete_all() for seq, obj_xml in enumerate(dbobj_xml): # init_vals = {col:", "col) all_memcol = memcol.select_many(where=[], order=[('seq', False)]) async for _ in all_memcol: memcol_xml =", "'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name', 'form_name', 'auto_start', 'auto_startrow',", "from setup_form after form_name if form does not exist form_defn = caller.data_objects['form'] form_xml", "await set_if_not_none(form_xml, form_vars, 'after_start_form') await set_if_not_none(form_xml, form_vars, 'on_close_form') form_xml.append(await form_vars.getval('dbobj_xml')) form_xml.append(await form_vars.getval('memobj_xml')) form_xml.append(await", "form_defn = caller.data_objects['form'] form_vars = caller.data_objects['form_vars'] frame_vars = caller.data_objects['frame_vars'] inline_vars = caller.data_objects['inline_vars'] await", "await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype, }) await subtypes.save() async def dump_body_elem(caller, xml):", "await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml", "'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object', 'growable', 'num_grid_rows', 'cursor_name',", "# for col in memcol_cols} init_vals = {} for col in memcol_cols: init_vals[col]", "seq, input_xml in enumerate(inputs_xml): # init_vals = {col: inputs.get_val_from_xml(col, input_xml.get(col)) # for col", "BY col_type, seq\" .format(caller.company, table_id) ) async for col_name, descr in await conn.exec_sql(sql):", "'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml = etree.Element('input_params') inputs = caller.data_objects['inputs'] all_inputs = inputs.select_many(where=[],", "body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml']", "await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['body_xml'] = elem_xml.find('body') init_vals['buttonrow_xml']", "output_cols: init_vals[col] = await outputs.get_val_from_xml(col, output_xml.get(col)) init_vals['seq'] = seq await outputs.init(display=False, init_vals=init_vals) await", "init_vals=init_vals) await body.save(from_upd_on_save=True) # a trick to prevent running 'on_clean' # could make", "form_vars.getval(col_name) await form_vars.restore() await form_vars.init(init_vals=init_vals, display=False) form_vars.init_vals = {} \"\"\" #----------------------------------------------------------------------------- # db_obj", "False)]) async for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in", "elem_xml = etree.Element(elem_type) subtypes = caller.data_objects['subtypes'] subtypes_xml = etree.Element('subtypes_temp') all_subtypes = subtypes.select_many(where=[], order=[('subtype_id',", "col_names.save() \"\"\" # \"\"\" table_name = await dbobj.getval('table_name') db_table = await db.objects.get_db_table( form_vars.context,", "init_vals['main_object'] = frame_xml.get('main_object') init_vals['obj_descr'] = frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): #", "xml): # called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml", "'tool') for col in tool_cols: await set_if_not_none(tool_xml, tool, col) await form_vars.setval('toolbar_xml', toolbar_xml) #-----------------------------------------------------------------------------", "col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with db_session.get_connection()", "= frame_xml.get('obj_descr') await frame_vars.init(init_vals=init_vals) for inline_xml in form_xml.iterchildren('inline_form'): # do not descend init_vals", "await form_vars.setval('body_xml', body_xml) await body.delete_all(from_upd_on_save=True) # a trick to prevent running 'on_clean' for", "= {} AND col_type != 'virt' \" \"AND col_name NOT IN ('row_id', 'created_id',", "alias, without gui link (cleaner?) async def dump_body(caller, xml): # called from setup_form_body", "= caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml':", "body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name')) # await body.setval('form_name', await grid_vars.getval('form_name'))", "= caller.data_objects['col_names'] all_obj = obj_names.select_many(where=[], order=[]) async for _ in all_obj: print(obj_names) all_col", "for col in dbobj_cols} init_vals = {} for col in dbobj_cols: init_vals[col] =", "== 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr',", "cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={ 'name': dbobj_elem.get('name'),", "'on_start_frame' form_vars = caller.data_objects['form_vars'] memobj_xml = await form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol =", "await db_obj.get_val_for_xml(col_name) # returns None if None or equal to default if xml_val", "await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await form_vars.save() for seq, method_xml in enumerate(methods_xml): # init_vals =", "all_inline: inline_xml = etree.SubElement(form_xml, 'inline_form') inline_xml.set('name', await inline_vars.getval('name')) inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml'))", "(await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] = (await grid_vars.getval('filter_xml'))[:] elem_xml.find('cur_sequence')[:] = (await grid_vars.getval('sequence_xml'))[:] elem_xml.find('grid_methods')[:] = (await", "await obj_names.getval('row_id') for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')})", "frame_vars.init() return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml']", "= elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object']", "etree.fromstring(etree.tostring(form_xml), parser=xsd_parser) except (etree.XMLSyntaxError, ValueError, TypeError) as e: raise AibError(head='XmlError', body=e.args[0]) # update", "columns instead # only use it to store sub_elements # P.S. it is", "elem_type == 'subtype_frame': subtype_vars = caller.data_objects['subtype_vars'] await subtype_vars.init(init_vals={ 'subtype_obj': await body.getval('subtype_obj'), 'subtype_col': await", "'allow_null', 'allow_amend', 'max_len', 'db_scale', 'scale_ptr', 'dflt_val', 'dflt_rule', 'col_checks', 'fkey', 'choices', 'sql') async def", "= form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml( 'before_start_form', form_xml.get('before_start_form')) init_vals['after_start_form'] =", "await obj_names.init(init_vals={ 'name': dbobj.get('name'), 'descr': descr}) await obj_names.save() sql = ( \"SELECT col_name,", "# called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars'] body = caller.data_objects['body'] \"\"\" obj_names", "'btn_validate', 'action', 'validation', 'help_msg') async def load_buttonrow(caller, xml): # called from setup_form_buttonrow 'on_start_frame'", "etree.SubElement(buttonrow_xml, 'button') for col in button_cols: await set_if_not_none(button_xml, button, col) await form_vars.setval('buttonrow_xml', buttonrow_xml)", "'obj_descr') frame_xml.append(await frame_vars.getval('toolbar_xml')) frame_xml.append(await frame_vars.getval('body_xml')) frame_xml.append(await frame_vars.getval('buttonrow_xml')) frame_xml.append(await frame_vars.getval('methods_xml')) inline_vars = caller.data_objects['inline_vars'] all_inline", "use sub_type columns instead # only use it to store sub_elements # P.S.", "for _ in all_inputs: input_xml = etree.SubElement(inputs_xml, 'input_param') for col in input_cols: await", "for col in input_cols: await set_if_not_none(input_xml, inputs, col) await form_vars.setval('inputs_xml', inputs_xml) outputs_xml =", "return init_vals={} init_vals['dbobj_xml'] = form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] =", "init_vals[col] = await inputs.get_val_from_xml(col, input_xml.get(col)) init_vals['seq'] = seq await inputs.init(display=False, init_vals=init_vals) await inputs.save()", "= caller.data_objects['form_vars'] dbobj_xml = await form_vars.getval('dbobj_xml') orig_dbobj = set((dbobj.get('name') for dbobj in dbobj_xml))", "def load_form_xml(caller, xml): # called from setup_form 'on_start_frame' form_defn = caller.data_objects['form'] form_vars =", "for _ in all_dbobj: dbobj_xml = etree.SubElement(dbobjs_xml, 'db_obj') for col in dbobj_cols: await", "= ('name', 'table_name', 'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called", "caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for", "async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml, subtypes, 'subtype_id')", "caller.data_objects['gridframe_vars'] # await body.setval('main_object', await gridframe_vars.getval('main_object')) # await body.setval('obj_descr', await gridframe_vars.getval('obj_descr')) if elem_xml", "= caller.data_objects['method'] methods_xml = etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[],", "'sql') async def load_mem_obj(caller, xml): # called from setup_form_memobj 'on_start_frame' form_vars = caller.data_objects['form_vars']", "# anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj}) await obj_names.delete() await form_vars.setval('memobj_xml',", "in all_obj: print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names)", "puts back the values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: # exclude", "'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names'] await", "memcol_cols: init_vals[col] = await memcol.get_val_from_xml(col, memcol_xml.get(col)) init_vals['seq'] = seq await memcol.init(display=False, init_vals=init_vals) await", "await frame_vars.init(init_vals=init_vals) async def dump_inline(caller, xml): # called from setup_form_inlline grid_frame 'before_save' inline_vars", "await col_names.save() for deleted_obj in orig_memobj: # anything left has been deleted await", "= etree.Element('form') form_xml.set('name', await form_defn.getval('form_name')) etree.SubElement(form_xml, 'db_objects') etree.SubElement(form_xml, 'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params')", "init_vals['elem'] = elem_xml init_vals['type'] = elem_xml.tag init_vals['seq'] = seq for fld in body.sub_types['type'][elem_xml.tag]:", "caller.data_objects['inline_vars'] frame_vars = caller.data_objects['frame_vars'] if inline_vars.exists: frame_xml = await inline_vars.getval('frame_xml') init_vals={} init_vals['toolbar_xml'] =", "('row_id', 'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async for", "await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq':", "is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template')))", "inline_xml.get('title'), 'frame_xml': inline_xml.find('frame'), } await inline_vars.init(init_vals=init_vals) await inline_vars.save() async def dump_form_xml(caller, xml): #", "gridframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await gridframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await gridframe_vars.getval('methods_xml'))[:] elif elem_type == 'tree_frame':", "from setup_form_buttonrow 'on_start_frame' form_vars = caller.data_objects['form_vars'] buttonrow_xml = await form_vars.getval('buttonrow_xml') if buttonrow_xml is", "order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body') await set_if_not_none(subtype_xml,", "'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action',", "'clone_from') memcol_cols = ('col_name', 'col_type', 'data_type', 'short_descr', 'long_descr', 'col_head', 'key_field', 'data_source', 'condition', 'allow_null',", "so n/a }) await obj_names.save() all_cols = memcol.select_many(where=[], order=[]) async for _ in", "# \"\"\" memobj_xml = await form_vars.getval('memobj_xml') for memobj in memobj_xml.iter('mem_obj'): await obj_names.init(init_vals={ 'name':", "frame_xml = etree.SubElement(form_xml, 'frame') await set_if_not_none(frame_xml, frame_vars, 'main_object') await set_if_not_none(frame_xml, frame_vars, 'obj_descr') frame_xml.append(await", "WHERE table_name = '{}'\" .format(caller.company, dbobj.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr", "etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml'] = etree.SubElement(frame_xml,", "caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml = etree.Element('toolbar') await set_if_not_none(toolbar_xml, form_vars, 'tb_template', 'template') await", "while True: # if 'obj_names' in parent.data_objects: # caller.data_objects['obj_names'] = parent.data_objects['obj_names'] # caller.data_objects['col_names']", "= await form_vars.getval('toolbar_xml') if toolbar_xml is None: toolbar_xml = etree.Element('toolbar') await form_vars.setval('toolbar_xml', toolbar_xml)", "'group_name', 'member_name', 'pyfunc', 'prev', 'align', 'src', 'op', 'tgt') async def before_start_body(caller, xml): #", "dbobjs_xml) #----------------------------------------------------------------------------- # mem_obj #----------------------------------------------------------------------------- memobj_cols = ('name', 'descr', 'parent', 'sequence', 'sub_types', 'tree_params',", "elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars'] init_vals={} init_vals['main_object'] = await body.getval('main_object') init_vals['obj_descr']", "values init_vals = {} for col_defn in form_vars.db_table.col_list[1:]: # exclude 'row_id' col_name =", "col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={} init_vals['toolbar_xml']", "attr_name is None: # if not specified, use col_name attr_name = col_name elem_xml.set(attr_name,", "in method_cols} init_vals = {} for col in method_cols: init_vals[col] = await method.get_val_from_xml(col,", "frame_xml.find('toolbar') init_vals['body_xml'] = frame_xml.find('body') init_vals['buttonrow_xml'] = frame_xml.find('button_row') init_vals['methods_xml'] = frame_xml.find('frame_methods') init_vals['main_object'] = frame_xml.get('main_object')", "parent = parent.parent pass async def load_body(caller, xml): # called from setup_form_body 'on_start_frame'", "body.setval('auto_startrow', await grid_vars.getval('auto_startrow')) if elem_xml is None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml,", "# called from setup_form_frame.toolbar 'before_save' form_vars = caller.data_objects['form_vars'] tool = caller.data_objects['tool'] toolbar_xml =", "caller.data_objects['col_names'] await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async", "for col in output_cols: await set_if_not_none(output_xml, outputs, col) await form_vars.setval('outputs_xml', outputs_xml) #----------------------------------------------------------------------------- #", "pass async def load_body(caller, xml): # called from setup_form_body 'on_start_frame' form_vars = caller.data_objects['form_vars']", "body.setval('growable', await grid_vars.getval('growable')) # await body.setval('num_grid_rows', await grid_vars.getval('num_grid_rows')) # await body.setval('cursor_name', await grid_vars.getval('cursor_name'))", "await inputs.save() outputs = caller.data_objects['outputs'] await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): #", "etree.XMLParser(remove_blank_text=True) import db.objects import db.api db_session = db.api.start_db_session() # need independent connection for", "await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml = form_xml.find('frame') init_vals={}", "await outputs.delete_all() for seq, output_xml in enumerate(outputs_xml): # init_vals = {col: outputs.get_val_from_xml(col, output_xml.get(col))", "init_vals = {} for col in method_cols: init_vals[col] = await method.get_val_from_xml(col, method_xml.get(col)) init_vals['seq']", "= etree.SubElement(body_xml, await body.getval('type')) for col in body_cols: await set_if_not_none(elem_xml, body, col) elem_xml[:]", "False)]) async for _ in all_body: elem_xml = etree.SubElement(body_xml, await body.getval('type')) for col", "but only if not None or default xml_val = await db_obj.get_val_for_xml(col_name) # returns", "= await body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars =", "= await body.getval('main_object') init_vals['obj_descr'] = await body.getval('obj_descr') init_vals['combo_type'] = await body.getval('combo_type') init_vals['toolbar_xml'] =", "- # save all values in init_vals # call form_vars.restore(), which triggers on_clean()", "new form_xml await form_defn.setval('form_xml', form_xml) \"\"\" # the next bit is a trick", "col_names.save() \"\"\" body_xml = await form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body')", "'obj_name', 'col_name', 'lng', 'height', 'pwd', 'readonly', 'choice', 'lookup', 'radio', 'before', 'form_dflt', 'validation', 'after',", "await form_vars.setval('buttonrow_xml', buttonrow_xml) #----------------------------------------------------------------------------- # methods #----------------------------------------------------------------------------- method_cols = ('name', 'obj_name', 'action') async", "form_vars.getval('memobj_xml') memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] await memcol.delete_all() await memobj.delete_all() for seq,", "is None: buttonrow_xml = etree.Element('button_row') await form_vars.setval('buttonrow_xml', buttonrow_xml) await form_vars.setval('btn_template', await form_vars.get_val_from_xml('btn_template', buttonrow_xml.get('template')))", "'seq': await memobj.getval('seq'), # seq is db_obj then mem_obj, so n/a }) await", "elem_xml = await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml']", "elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif elem_type == 'grid_frame': gridframe_vars = caller.data_objects['gridframe_vars']", "None: elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml,", "from collections import OrderedDict as OD from lxml import etree # parser =", "'form_dflt', 'validation', 'after', 'btn_id', 'btn_label', 'btn_enabled', 'btn_validate', 'action', 'help_msg', 'nb_label', 'subtype_obj', 'subtype_col', 'data_object',", "'created_id', 'deleted_id') \" \"ORDER BY col_type, seq\" .format(caller.company, table_id) ) async for col_name,", "print(obj_names) all_col = col_names.select_many(where=[], order=[]) async for _ in all_col: print(col_names) print() \"\"\"", "caller.data_objects['subtypes'] await subtypes.delete_all() for subtype in elem_xml.iter('subtype_body'): await subtypes.init(init_vals={ 'subtype_id': subtype.get('subtype_id'), 'body_xml': subtype,", "for col in method_cols} init_vals = {} for col in method_cols: init_vals[col] =", "= caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml = etree.Element('mem_objects') all_memobj =", "form_defn.getval('form_name')) form_xml.set('title', await form_defn.getval('title')) await set_if_not_none(form_xml, form_vars, 'before_start_form') await set_if_not_none(form_xml, form_vars, 'after_start_form') await", "inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for name, frame_xml in", "etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) from common import AibError from common import", "init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml, 'body') init_vals['buttonrow_xml'] = etree.SubElement(frame_xml, 'button_row') init_vals['methods_xml']", "#----------------------------------------------------------------------------- # body_elem #----------------------------------------------------------------------------- async def load_body_elem(caller, xml): # called from setup_form_body.grid_frame 'on_start_frame'", "dbobj.get_val_from_xml(col, obj_xml.get(col)) # for col in dbobj_cols} init_vals = {} for col in", "remove_comments=True, remove_blank_text=True) from common import AibError from common import log, debug async def", "# init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals =", "in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'), 'seq': seq}) await col_names.save() frame_xml =", "# however, inline_xml is a 'list' which includes etree Elements # this cannot", "etree.Element('frame_methods') await set_if_not_none(methods_xml, form_vars, 'method_template', 'template') all_methods = method.select_many(where=[], order=[('seq', False)]) async for", "'parent', 'fkey', 'cursor', 'is_formview_obj') async def load_db_obj(caller, xml): # called from setup_form_dbobj 'on_start_frame'", "init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await form_vars.get_val_from_xml(", "elem_xml.find('cur_columns') init_vals['filter_xml'] = elem_xml.find('cur_filter') init_vals['sequence_xml'] = elem_xml.find('cur_sequence') init_vals['methods_xml'] = elem_xml.find('grid_methods') await grid_vars.init(init_vals=init_vals) elif", "'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:]", "for deleted_obj in orig_dbobj: # anything left has been deleted await obj_names.init(init_vals={'name': deleted_obj})", "for memcol in memobj.iter('mem_col'): await col_names.init(init_vals={'obj_id': obj_row_id, 'name': memcol.get('col_name'), 'descr': memcol.get('short_descr')}) await col_names.save()", "await form_vars.getval('body_xml') if body_xml is None: body_xml = etree.Element('body') await form_vars.setval('body_xml', body_xml) await", "= {col: outputs.get_val_from_xml(col, output_xml.get(col)) # for col in output_cols} init_vals = {} for", "form_xml.find('db_objects') init_vals['memobj_xml'] = form_xml.find('mem_objects') init_vals['inputs_xml'] = form_xml.find('input_params') init_vals['outputs_xml'] = form_xml.find('output_params') init_vals['before_start_form'] = await", "etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await treeframe_vars.getval('toolbar_xml'))[:] elem_xml.find('body')[:] = (await", "elem_xml.find('frame_methods') await gridframe_vars.init(init_vals=init_vals) elif elem_type == 'tree_frame': treeframe_vars = caller.data_objects['treeframe_vars'] init_vals={} init_vals['main_object'] =", "= await obj_names.getval('row_id') for seq, memcol in enumerate(memobj.iter('mem_col')): await col_names.init(init_vals={'name': memcol.get('col_name'), 'descr': memcol.get('short_descr'),", "= await form_vars.get_val_from_xml( 'after_start_form', form_xml.get('after_start_form')) init_vals['on_close_form'] = await form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals)", "def dump_ioparms(caller, xml): # called from setup_form_ioparams 'do_save' form_vars = caller.data_objects['form_vars'] inputs_xml =", "'cur_sequence') etree.SubElement(elem_xml, 'grid_methods') elem_xml.find('toolbar')[:] = (await grid_vars.getval('toolbar_xml'))[:] elem_xml.find('cur_columns')[:] = (await grid_vars.getval('columns_xml'))[:] elem_xml.find('cur_filter')[:] =", "a trick # we want to 'save' form_vars, to trigger on_clean() # however,", "enumerate(obj_xml.iter('mem_col')): # init_vals = {col: memcol.get_val_from_xml(col, memcol_xml.get(col)) # for col in memcol_cols} init_vals", "= subtypes.select_many(where=[], order=[('subtype_id', False)]) async for _ in all_subtypes: subtype_xml = etree.SubElement(subtypes_xml, 'subtype_body')", "all_inputs = inputs.select_many(where=[], order=[('seq', False)]) async for _ in all_inputs: input_xml = etree.SubElement(inputs_xml,", "await col_names.delete_all() dbobj_xml = await form_vars.getval('dbobj_xml') for dbobj_elem in dbobj_xml.iter('db_obj'): \"\"\" async with", "in orig_dbobj: orig_dbobj.remove(obj_name) else: \"\"\" async with db_session.get_connection() as db_mem_conn: conn = db_mem_conn.db", "= set((dbobj.get('name') for dbobj in dbobj_xml)) obj_names = caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] dbobj", "'mem_objects') etree.SubElement(form_xml, 'input_params') etree.SubElement(form_xml, 'output_params') frame = etree.SubElement(form_xml, 'frame') etree.SubElement(frame, 'toolbar') etree.SubElement(frame, 'body')", "enumerate(db_table.col_list): await col_names.init(init_vals={'name': col_defn.col_name, 'descr': col_defn.short_descr, 'seq': seq}) await col_names.save() # \"\"\" memobj_xml", "from setup_form_body 'before_save' body = caller.data_objects['body'] body_xml = etree.Element('body') all_body = body.select_many(where=[], order=[('seq',", "body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] init_vals={} init_vals['toolbar_xml'] = elem_xml.find('toolbar') init_vals['columns_xml']", "db_table.short_descr}) await obj_names.save() for col_defn in db_table.col_list: await col_names.init(init_vals={ 'name': col_defn.col_name, 'descr': col_defn.short_descr})", "frame_xml.get('obj_descr') else: frame_xml = etree.Element('frame') init_vals={} init_vals['toolbar_xml'] = etree.SubElement(frame_xml, 'toolbar') init_vals['body_xml'] = etree.SubElement(frame_xml,", "# returns None if None or equal to default if xml_val is not", "memobj_cols} init_vals = {} for col in memobj_cols: init_vals[col] = await memobj.get_val_from_xml(col, obj_xml.get(col))", "dumping elem_type = await body.getval('type') elem_xml = await body.getval('elem') if elem_type == 'grid':", "# inline_xml.set('name', name) # inline_xml.append(frame_xml) # validate result using schema try: etree.fromstring(etree.tostring(form_xml), parser=xsd_parser)", "elem_xml.find('body')[:] = (await treeframe_vars.getval('body_xml'))[:] elem_xml.find('button_row')[:] = (await treeframe_vars.getval('buttonrow_xml'))[:] elem_xml.find('frame_methods')[:] = (await treeframe_vars.getval('methods_xml'))[:] elif", "def dump_body_elem(caller, xml): # called from setup_form_body.grid_frame 'before_save' body = caller.data_objects['body'] elem_type =", "await body.getval('elem') if elem_type == 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await", "elem_xml = etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'cur_columns') etree.SubElement(elem_xml, 'cur_filter') etree.SubElement(elem_xml, 'cur_sequence') etree.SubElement(elem_xml, 'grid_methods')", "form_vars.save() tool = caller.data_objects['tool'] await tool.delete_all() for seq, tool_xml in enumerate(toolbar_xml): # init_vals", "form_vars.get_val_from_xml( 'on_close_form', form_xml.get('on_close_form')) await form_vars.init(init_vals=init_vals) obj_names = caller.data_objects['obj_names'] await obj_names.delete_all() col_names = caller.data_objects['col_names']", "= caller.data_objects['obj_names'] col_names = caller.data_objects['col_names'] memobj = caller.data_objects['memobj'] memcol = caller.data_objects['memcol'] memobjs_xml =", "form_vars.setval('toolbar_xml', toolbar_xml) await form_vars.setval('tb_template', await form_vars.get_val_from_xml('tb_template', toolbar_xml.get('template'))) await form_vars.setval('tb_title', await form_vars.get_val_from_xml('tb_title', toolbar_xml.get('title'))) await", "'type', 'source') async def load_ioparms(caller, xml): # called from setup_form_ioparams 'on_start_frame' form_vars =", "None: methods_xml = etree.Element('frame_methods') await form_vars.setval('methods_xml', methods_xml) await form_vars.setval('method_template', await form_vars.get_val_from_xml('method_template', methods_xml.get('template'))) await", "this to store attributes - use sub_type columns instead # only use it", "inline_xml.set('title', await inline_vars.getval('title')) inline_xml.append(await inline_vars.getval('frame_xml')) # inline_params = await form_vars.getval('inline_xml') # for name,", "await method.save() async def dump_methods(caller, xml): # called from setup_form_methods 'before_save' form_vars =", "= etree.Element(elem_type) etree.SubElement(elem_xml, 'toolbar') etree.SubElement(elem_xml, 'body') etree.SubElement(elem_xml, 'button_row') etree.SubElement(elem_xml, 'frame_methods') elem_xml.find('toolbar')[:] = (await", "dbobj_elem.get('table_name')) ) cur = await conn.exec_sql(sql) table_id, descr = await cur.__anext__() await obj_names.init(init_vals={", "== 'grid': grid_vars = caller.data_objects['grid_vars'] # await body.setval('data_object', await grid_vars.getval('data_object')) # await body.setval('obj_descr'," ]
[ "pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample", "import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize", "# ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi", "fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the accuracy", "__create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the accuracy of the model for", "Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\",", "def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the accuracy of the model", "\"\"\" Creates a bar plot showing the accuracy of the model for each", "bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the accuracy of", "= plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\")", "palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is", "each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\",", "accuracy of the model for each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores,", "file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02)", "is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is", "fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the", "model for each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\",", "seaborn as sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30,", "the accuracy of the model for each marker :return: \"\"\" ax = sns.catplot(", "import seaborn as sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax =", "if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif", "sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300)", "a bar plot showing the accuracy of the model for each marker :return:", "self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file", "y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else:", "\"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6", "None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_{self.test_file_name}_score_predictions.png\"))", "plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_{self.test_file_name}_score_predictions.png\")) plt.close()", "Creates a bar plot showing the accuracy of the model for each marker", ") ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single", "is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test File\", y=1.02) ax.legend.set_title(\"Model\")", "plt import seaborn as sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax", "= ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot", "kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\")", "as sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30),", "figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight')", "ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates", "intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a", "ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\")", "height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: #", "File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\"))", "x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0,", "Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"),", "for each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\",", "data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\",", "\"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02)", "ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker", "plot showing the accuracy of the model for each marker :return: \"\"\" ax", "ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\")", "inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def", "30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig =", "showing the accuracy of the model for each marker :return: \"\"\" ax =", "of the model for each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\",", "plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig", "self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test File\", y=1.02)", "ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 )", "sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2", "ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing", "hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if", "dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure()", "1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\"))", "__create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train,", "plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\")", "fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers)", "Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in", "as plt import seaborn as sns from pathlib import Path def __create_intensity_heatmap_plot(self): fig,", "ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\",", "elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test File\",", "in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close()", "y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1))", "= sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True)", "sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self):", "ci=\"sd\", palette=\"dark\", alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file", "alpha=.6, height=6 ) ax.despine(left=True) ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None:", "matplotlib.pyplot as plt import seaborn as sns from pathlib import Path def __create_intensity_heatmap_plot(self):", "ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train", "None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single File\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None:", "ax.savefig(Path(f\"results/lr/{self.train_file_name}_score_predictions.png\")) elif self.train_file is None: plt.title(\"Multi Files\", y=1.02) ax.legend.set_title(\"Model\") ax.savefig(Path(f\"results/lr/{self.test_file_name}_multi_score_predictions.png\")) else: plt.title(\"Train Test", "marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\",", "<filename>src/Plotting/linear_plots.py<gh_stars>1-10 import matplotlib.pyplot as plt import seaborn as sns from pathlib import Path", ":return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\", y=\"Marker\", hue=\"Model\", ci=\"sd\", palette=\"dark\", alpha=.6,", "import matplotlib.pyplot as plt import seaborn as sns from pathlib import Path def", "fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar", "plt.close() def __create_r2_accuracy_plot(self): \"\"\" Creates a bar plot showing the accuracy of the", "bar plot showing the accuracy of the model for each marker :return: \"\"\"", "# Sample figsize in inches sns.heatmap(self.train_data.X_train, xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout()", "def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) # Sample figsize in inches", "xticklabels=self.train_data.markers) ax.set_title(\"Marker intensities\") fig = ax.get_figure() fig.tight_layout() fig.savefig(Path(f\"results/lr/marker_heatmap.png\"), bbox_inches='tight') plt.close() def __create_r2_accuracy_plot(self): \"\"\"", "from pathlib import Path def __create_intensity_heatmap_plot(self): fig, ax = plt.subplots(figsize=(30, 30), dpi=300) #", "ax.set_axis_labels(\"R2 Score\", \"Marker\") ax.set(xlim=(0, 1)) if self.test_file is None: # ax.fig.suptitle(\"Single file\") plt.title(\"Single", "the model for each marker :return: \"\"\" ax = sns.catplot( data=self.prediction_scores, kind=\"bar\", x=\"Score\"," ]
[ "= output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\"", "- self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__': init() s", "LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg", "queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance =", "self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def", "= CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist =", "= 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" %", "self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2", "front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output", "queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped()", "< 2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed", "#!/usr/bin/env python \"\"\" YaBoyWonder LICENSE: Apache-2.0 \"\"\" import os import rospy from enum", "Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import PIDController", "= 0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist =", "1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist,", "\"\"\" import os import rospy from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped", "self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller =", "msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist", "% cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg)", "if front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle =", "self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg =", "AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import PIDController class WallFollowerNode: def __init__(self):", "else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall:", "Apache-2.0 \"\"\" import os import rospy from enum import Enum from ackermann_msgs.msg import", "print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__", "cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if", "%s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__': init() s = WallFollowerNode() rospy.spin()", "LaserScan from controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan',", "CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))])", "laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if", "msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7 else:", "= AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self,", "= 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output =", "rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller", "1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" % cur_dist)", "WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0',", "= msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7", "% (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__':", "import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10)", "YaBoyWonder LICENSE: Apache-2.0 \"\"\" import os import rospy from enum import Enum from", "front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed =", "from sensor_msgs.msg import LaserScan from controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME)", "python \"\"\" YaBoyWonder LICENSE: Apache-2.0 \"\"\" import os import rospy from enum import", "<reponame>YaBoyWonder/Racecar<gh_stars>1-10 #!/usr/bin/env python \"\"\" YaBoyWonder LICENSE: Apache-2.0 \"\"\" import os import rospy from", "enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers", "print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State:", "self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output", "= 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance", "rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge()", "import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import PIDController class WallFollowerNode: def", "= self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist -", "sensor_msgs.msg import LaserScan from controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber", "PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed =", "self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist", "from controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan,", "self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist", "import os import rospy from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from", "output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\" %", "\"\"\" YaBoyWonder LICENSE: Apache-2.0 \"\"\" import os import rospy from enum import Enum", "import rospy from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import", "= self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed =", "def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped,", "import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import", "from wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\"", "self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance))", "LICENSE: Apache-2.0 \"\"\" import os import rospy from enum import Enum from ackermann_msgs.msg", "(cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__': init()", "PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher", "%s\" % cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state)", "self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8", "2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed =", "self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist", "= PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed", "steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed", "def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance)", "from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from", "self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\" % cur_dist) print(\"Error:", "0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from", "__init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10)", "cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist <", "os import rospy from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg", "controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback,", "= rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2,", "rospy from enum import Enum from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan", "rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0)", "self.drive_publisher = rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge", "import LaserScan from controllers import PIDController class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber =", "0.8 self.drive_speed = 1.2 def laser_callback(self, msg): cur_dist = min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2)", "class WallFollowerNode: def __init__(self): rospy.init_node(STATE_MACHINE_NODE_NAME) self.laser_subscriber = rospy.Subscriber('/scan', LaserScan, self.laser_callback, queue_size=10) self.drive_publisher =", "kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2", "from ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import PIDController class", "self.goal_distance) if front_dist < 2.0: self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle", "AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance = 0.8 self.drive_speed = 1.2 def laser_callback(self, msg):", "print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__': init() s = WallFollowerNode()", "= rospy.Publisher('/vesc/high_level/ackermann_cmd_mux/input/nav_0', AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge =", "self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed print(\"Distance from wall: %s\"", "%s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ ==", "ackermann_msgs.msg import AckermannDriveStamped from sensor_msgs.msg import LaserScan from controllers import PIDController class WallFollowerNode:", "self.drive_speed = 0.7 else: self.drive_speed = 1.2 self.drive_msg.drive.steering_angle = output self.drive_msg.drive.speed = self.drive_speed", "self.goal_distance)) print(\"State: %s\" % self.state) self.drive_publisher.publish(self.drive_msg) if __name__ == '__main__': init() s =", "AckermannDriveStamped, queue_size=10) self.steering_controller = PIDController(kP=1.2, kD=0) self.drive_msg = AckermannDriveStamped() self.cv_bridge = CvBridge() self.goal_distance", "= min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0:", "min(msg.ranges[int(0.4*len(msg.ranges)):int(0.6*len(msg.ranges))]) front_dist = msg.ranges(len(msg.ranges)/2) steering_output = self.steering_controller.output(cur_dist, self.goal_distance) if front_dist < 2.0: self.drive_speed", "wall: %s\" % cur_dist) print(\"Error: %s\" % (cur_dist - self.goal_distance)) print(\"State: %s\" %" ]
[ "from .command import Command from .connection import Connection from .client_base import ClientBase from", "int = 5250): if not self.connection: self.connection = Connection(host, port) def send(self, command:", "connect(self, host: str = \"127.0.0.1\", port: int = 5250): if not self.connection: self.connection", "send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase:", "port: int = 5250): if not self.connection: self.connection = Connection(host, port) def send(self,", "-> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase: data = self.connection.receive() return", ".client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection", "\"\"\" Simple connection client class. \"\"\" def connect(self, host: str = \"127.0.0.1\", port:", "ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class.", "= \"127.0.0.1\", port: int = 5250): if not self.connection: self.connection = Connection(host, port)", "5250): if not self.connection: self.connection = Connection(host, port) def send(self, command: Command) ->", "port) def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes)", "return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self)", "import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def connect(self,", "Connection from .client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\"", "\"\"\" def connect(self, host: str = \"127.0.0.1\", port: int = 5250): if not", "= Connection(host, port) def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self,", "command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data)", "= 5250): if not self.connection: self.connection = Connection(host, port) def send(self, command: Command)", "Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return", "self.connection = Connection(host, port) def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def", "ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def connect(self, host: str", "connection client class. \"\"\" def connect(self, host: str = \"127.0.0.1\", port: int =", "-> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response()", ".connection import Connection from .client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class", "def connect(self, host: str = \"127.0.0.1\", port: int = 5250): if not self.connection:", "def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase:", "import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client", "send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase: data", "data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase: data =", "ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase: data = self.connection.receive() return ResponseFactory.create_from_bytes(data)", "amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def", "self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) ->", "from .client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple", "from .connection import Connection from .client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory", "bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def process_response(self) -> ResponseBase: data = self.connection.receive()", "ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def connect(self, host:", "host: str = \"127.0.0.1\", port: int = 5250): if not self.connection: self.connection =", "class. \"\"\" def connect(self, host: str = \"127.0.0.1\", port: int = 5250): if", "\"127.0.0.1\", port: int = 5250): if not self.connection: self.connection = Connection(host, port) def", "import Command from .connection import Connection from .client_base import ClientBase from amcp_pylib.response import", "client class. \"\"\" def connect(self, host: str = \"127.0.0.1\", port: int = 5250):", "not self.connection: self.connection = Connection(host, port) def send(self, command: Command) -> ResponseBase: return", "self.connection: self.connection = Connection(host, port) def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command))", "ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) -> ResponseBase: self.connection.send(data) return self.process_response() def", "Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def connect(self, host: str = \"127.0.0.1\",", "if not self.connection: self.connection = Connection(host, port) def send(self, command: Command) -> ResponseBase:", "Command from .connection import Connection from .client_base import ClientBase from amcp_pylib.response import ResponseBase,", "import Connection from .client_base import ClientBase from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase):", "from amcp_pylib.response import ResponseBase, ResponseFactory class Client(ClientBase): \"\"\" Simple connection client class. \"\"\"", "Simple connection client class. \"\"\" def connect(self, host: str = \"127.0.0.1\", port: int", "def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data: bytes) ->", "str = \"127.0.0.1\", port: int = 5250): if not self.connection: self.connection = Connection(host,", "class Client(ClientBase): \"\"\" Simple connection client class. \"\"\" def connect(self, host: str =", "Connection(host, port) def send(self, command: Command) -> ResponseBase: return self.send_raw(bytes(command)) def send_raw(self, data:", ".command import Command from .connection import Connection from .client_base import ClientBase from amcp_pylib.response" ]
[ "np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type:", "output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs", "sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0))", "def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such as R^2 fit in", "effect size TODO: filter also relative effects by minimum absolute effect size \"\"\"", "the most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def", "experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output", "significance_cutoff is the minimum value to select the most important variables \"\"\" return", ") -> Fig_and_Axes: \"\"\" Plots distribution of absolute and relative effects Filters by", "get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[", "most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict):", "important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\"", "float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a table", "variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots", "Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution of absolute", "ExperimentOutput import causalimpact import matplotlib.pyplot as plt import matplotlib import seaborn as sns", "center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], #", "percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a table with", "for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e:", "\"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff in", "-> Dict[str, List[str]]: \"\"\" Per experiment id, gets a list of the most", "in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e: print(e) def plot_distribution_of_results(", "def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time series \"\"\" pass def", "(values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return", "dictionary with the name of the columns and the standardized coefficients \"\"\" try:", "relative effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float,", "average and percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def", "# type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type:", "Dict[str, List[str]]: \"\"\" Per experiment id, gets a list of the most important", "by a max_p_value and a minimum absolute effect size TODO: filter also relative", "standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name,", "with the name of the columns and the standardized coefficients \"\"\" try: standardized_coefficients", "as plt import matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def", "return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) ->", "-> Fig_and_Axes: \"\"\" Plots distribution of absolute and relative effects Filters by a", "minimum value to select the most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"],", "as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut:", "<filename>CausalImpactExplainer/reporting.py ############ # # Helper functions to report results of experiments # #", "the columns and the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return {", "import matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales,", "min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a", "of the most important variables significance_cutoff is the minimum value to select the", "scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def", "# type: ignore \"\"\" Returns a dictionary with the name of the columns", "coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff", "= Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt(", ") def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type:", "< max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if", "= absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution", "results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int", "Optional, Tuple import numpy as np from .utils import ExperimentOutput import causalimpact import", "axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes", "import matplotlib.pyplot as plt import matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure,", "# Helper functions to report results of experiments # # ############ from os", "of absolute and relative effects Filters by a max_p_value and a minimum absolute", "Plots the most important control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets", "############ from os import error import pandas as pd from typing import Dict,", "get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per experiment id,", "ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], )", "Per experiment id, gets a list of the most important variables significance_cutoff is", "max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per experiment id, gets a list", "col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e: print(e)", "and relative effects Filters by a max_p_value and a minimum absolute effect size", "{\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important", "sum, median, average and percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\":", "matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances):", "[ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] #", "pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such as R^2 fit", "causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\" Returns a dictionary with", "causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) ->", "\"\"\" Returns a dictionary with the name of the columns and the standardized", "from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot as plt import matplotlib import", "by minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\",", "= get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients", ") -> dict: # type: ignore \"\"\" Returns a dictionary with the name", "0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a table with the sum, median,", "type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore", "] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value <", "\"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs", "e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float]", "a minimum absolute effect size TODO: filter also relative effects by minimum absolute", "and a minimum absolute effect size TODO: filter also relative effects by minimum", "also relative effects by minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects", "the most important variables significance_cutoff is the minimum value to select the most", "variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore", "\"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time series", ") } except Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut:", "Tuple import numpy as np from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot", "effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output", "min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\")", "plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of", "standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::],", "axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1])", "pd from typing import Dict, List, Union, Optional, Tuple import numpy as np", "0.9], ) -> pd.DataFrame: \"\"\" Returns a table with the sum, median, average", "and the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff", "control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment,", "figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\")", "0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a table with the sum,", "a list of the most important variables significance_cutoff is the minimum value to", "return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most", "filter also relative effects by minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values()", "type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore )", "minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"]", "np from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot as plt import matplotlib", "0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns a table with the", "[3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per", "ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) ->", "output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut is not None:", "return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ],", "experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6),", "type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], #", "ignore \"\"\" Returns a dictionary with the name of the columns and the", "most important control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values", "experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value <", "[ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects", "seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0)", "axes[1].set_title(\"Distribution of relative effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float,", "plt import matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values,", "max_p_value and a minimum absolute effect size TODO: filter also relative effects by", "ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\"", "min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes", "figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution of absolute and relative effects", "/ np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], #", "absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for", "import pandas as pd from typing import Dict, List, Union, Optional, Tuple import", "1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution", "the sum, median, average and percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1],", "select the most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]}", "results of experiments # # ############ from os import error import pandas as", "causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients(", "pandas as pd from typing import Dict, List, Union, Optional, Tuple import numpy", "def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None,", "experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut is", "to select the most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\",", "get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such as R^2 fit in the", "output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects =", "causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type:", "matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) #", "the name of the columns and the standardized coefficients \"\"\" try: standardized_coefficients =", "Helper functions to report results of experiments # # ############ from os import", "def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore", "Gets diagnostic values per experiment, such as R^2 fit in the training period", "report results of experiments # # ############ from os import error import pandas", "for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\",", "= [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ]", "is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2,", "value to select the most important variables \"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\":", "the minimum value to select the most important variables \"\"\" return {\"id1\": [\"var2\",", ".utils import ExperimentOutput import causalimpact import matplotlib.pyplot as plt import matplotlib import seaborn", "try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff in zip(", "[\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control", "import numpy as np from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot as", "Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\" Returns a dictionary with the", "\"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects = [", "ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut:", "= None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution of absolute and", "= plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution", "{ col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except", "type: ignore \"\"\" Returns a dictionary with the name of the columns and", "absolute and relative effects Filters by a max_p_value and a minimum absolute effect", "], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: #", "effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1,", "causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str,", "experiment id, gets a list of the most important variables significance_cutoff is the", "relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut", "in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for", "effects Filters by a max_p_value and a minimum absolute effect size TODO: filter", "is the minimum value to select the most important variables \"\"\" return {\"id1\":", "median, average and percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]})", "\"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict:", "table with the sum, median, average and percentiles of the results \"\"\" return", "\"\"\" return {\"id1\": [\"var2\", \"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the", "Filters by a max_p_value and a minimum absolute effect size TODO: filter also", "\"\"\" Gets diagnostic values per experiment, such as R^2 fit in the training", "import ExperimentOutput import causalimpact import matplotlib.pyplot as plt import matplotlib import seaborn as", "plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time series \"\"\" pass def get_diagnostic_values_per_experiment():", "Returns a table with the sum, median, average and percentiles of the results", "standardized_coefficients ) } except Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput],", ") def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\" Returns", "# # ############ from os import error import pandas as pd from typing", "important control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per", "as np from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot as plt import", "6), ) -> Fig_and_Axes: \"\"\" Plots distribution of absolute and relative effects Filters", "the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name: coeff for", "max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes:", "return { col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) }", "diagnostic values per experiment, such as R^2 fit in the training period \"\"\"", "with the sum, median, average and percentiles of the results \"\"\" return pd.DataFrame({\"x\":", "and percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series(", "gets a list of the most important variables significance_cutoff is the minimum value", "TODO: filter also relative effects by minimum absolute effect size \"\"\" experiment_outputs =", "sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return", "variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object:", "coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e: print(e) def", "List[str]]: \"\"\" Per experiment id, gets a list of the most important variables", "None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution of absolute and relative", "= [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ]", ") -> pd.DataFrame: \"\"\" Returns a table with the sum, median, average and", "not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize)", "of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes def", "time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such", "\"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int )", "of the columns and the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return", "get_standardized_coefficients(causal_impact_object) return { col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients )", "sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame,", "ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig,", "min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1,", "distribution of absolute and relative effects Filters by a max_p_value and a minimum", "effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes def summary_of_results( results_df:", "2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative", "def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\" Returns a", "Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) /", "list of the most important variables significance_cutoff is the minimum value to select", "most important variables significance_cutoff is the minimum value to select the most important", "relative effects Filters by a max_p_value and a minimum absolute effect size TODO:", "output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size if", "absolute effect size TODO: filter also relative effects by minimum absolute effect size", "typing import Dict, List, Union, Optional, Tuple import numpy as np from .utils", "\"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such as R^2", ") -> Dict[str, List[str]]: \"\"\" Per experiment id, gets a list of the", "\"var3\"], \"id2\": [\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time", "max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value", "experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if", "summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], )", "of experiments # # ############ from os import error import pandas as pd", "# ############ from os import error import pandas as pd from typing import", "fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute effects\") sns.distplot(relative_effects,", "effects by minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [", "] # filter by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects", "a dictionary with the name of the columns and the standardized coefficients \"\"\"", "causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\"", "\"\"\" Plots distribution of absolute and relative effects Filters by a max_p_value and", "def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per experiment", "# type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore", "minimum absolute effect size TODO: filter also relative effects by minimum absolute effect", "fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5,", "columns and the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object) return { col_name:", "int ) -> Dict[str, List[str]]: \"\"\" Per experiment id, gets a list of", "None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects,", "plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12,", "[\"var3\", \"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time series \"\"\"", "of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float,", "-> dict: # type: ignore \"\"\" Returns a dictionary with the name of", "Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0,", "[1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]:", "-> pd.DataFrame: \"\"\" Returns a table with the sum, median, average and percentiles", "values per experiment, such as R^2 fit in the training period \"\"\" pass", "print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] =", "return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25,", "pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str,", "\"var8\"]} def plot_most_important_control_time_series(experiment_dict): \"\"\" Plots the most important control time series \"\"\" pass", "percentiles of the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff:", "filter by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut]", "Dict, List, Union, Optional, Tuple import numpy as np from .utils import ExperimentOutput", "Union, Optional, Tuple import numpy as np from .utils import ExperimentOutput import causalimpact", "\"\"\" Returns a table with the sum, median, average and percentiles of the", "return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact):", "# type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"], # type: ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"],", "import Dict, List, Union, Optional, Tuple import numpy as np from .utils import", "matplotlib.pyplot as plt import matplotlib import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot]", "id, gets a list of the most important variables significance_cutoff is the minimum", "= 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots", "\"\"\" Per experiment id, gets a list of the most important variables significance_cutoff", "relative effects by minimum absolute effect size \"\"\" experiment_outputs = experiment_results.values() absolute_effects =", "if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes =", "ignore causal_impact_object.model_samples[\"SparseLinearRegression/_local_scales_noncentered\"], # type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def", "from os import error import pandas as pd from typing import Dict, List,", "float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\"", "############ # # Helper functions to report results of experiments # # ############", "min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), ) -> Fig_and_Axes: \"\"\" Plots distribution of", "of relative effects\") return fig, axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut:", ">= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of absolute", "- scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights(", "type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact],", "< max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects", "size \"\"\" experiment_outputs = experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in", "numpy as np from .utils import ExperimentOutput import causalimpact import matplotlib.pyplot as plt", "Returns a dictionary with the name of the columns and the standardized coefficients", "import seaborn as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return", "results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) ->", "Dict[str, ExperimentOutput], max_p_value_cut: float = 1.0, min_abs_effect_size_cut: Optional[float] = None, figsize=(12, 6), )", "output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"]", "as sns Fig_and_Axes = Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) -", "causalimpact import matplotlib.pyplot as plt import matplotlib import seaborn as sns Fig_and_Axes =", "max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects =", "\"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\"", "center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore )", "import causalimpact import matplotlib.pyplot as plt import matplotlib import seaborn as sns Fig_and_Axes", "if output.trained_model.p_value < max_p_value_cut ] relative_effects = [ output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in", "# type: ignore causal_impact_object.model_samples[ \"SparseLinearRegression/_local_scale_variances\" ], # type: ignore ) def get_dictionary_of_standardized_coefficients( causal_impact_object:", "float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\" Returns", "to report results of experiments # # ############ from os import error import", "absolute effects\") sns.distplot(relative_effects, ax=axes[1]) axes[1].set_title(\"Distribution of relative effects\") return fig, axes def summary_of_results(", "absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut", "coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as", "get_dictionary_of_standardized_coefficients( causal_impact_object: Union[causalimpact.CausalImpact], ) -> dict: # type: ignore \"\"\" Returns a dictionary", "= experiment_results.values() absolute_effects = [ output.results_summary.loc[\"abs_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value", "important variables significance_cutoff is the minimum value to select the most important variables", "dict: # type: ignore \"\"\" Returns a dictionary with the name of the", "size TODO: filter also relative effects by minimum absolute effect size \"\"\" experiment_outputs", "def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9],", "def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore", "Plots distribution of absolute and relative effects Filters by a max_p_value and a", "List, Union, Optional, Tuple import numpy as np from .utils import ExperimentOutput import", "name of the columns and the standardized coefficients \"\"\" try: standardized_coefficients = get_standardized_coefficients(causal_impact_object)", "error import pandas as pd from typing import Dict, List, Union, Optional, Tuple", "from typing import Dict, List, Union, Optional, Tuple import numpy as np from", "\"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter by", "os import error import pandas as pd from typing import Dict, List, Union,", "variables significance_cutoff is the minimum value to select the most important variables \"\"\"", "the results \"\"\" return pd.DataFrame({\"x\": [1], \"y\": [3]}) def get_most_important_control_time_series( significance_cutoff: float, max_number_of_columns:", "absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0]) axes[0].set_title(\"Distribution of", "float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per experiment id, gets a", "experiments # # ############ from os import error import pandas as pd from", "as pd from typing import Dict, List, Union, Optional, Tuple import numpy as", "# # Helper functions to report results of experiments # # ############ from", "if output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut is not", "Tuple[matplotlib.figure.Figure, matplotlib.axes._subplots.Subplot] def center_weights(values, scales, variances): return (values.numpy().mean(axis=0) - scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0)", "zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception as e: print(e) def plot_distribution_of_results( experiment_results:", "col_name: coeff for col_name, coeff in zip( causal_impact_object.pre_data.columns[1::], standardized_coefficients ) } except Exception", "in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size if min_abs_effect_size_cut", "} except Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float", "for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter by min_abs_effect_size", "output.results_summary.loc[\"rel_effect\", \"cumulative\"] for output in experiment_outputs if output.trained_model.p_value < max_p_value_cut ] # filter", "by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig,", "# filter by min_abs_effect_size if min_abs_effect_size_cut is not None: absolute_effects = absolute_effects[absolute_effects >=", "a max_p_value and a minimum absolute effect size TODO: filter also relative effects", "max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame: \"\"\"", "a table with the sum, median, average and percentiles of the results \"\"\"", "import error import pandas as pd from typing import Dict, List, Union, Optional,", "scales.numpy().mean(axis=0)) / np.sqrt( variances.numpy().mean(axis=0) # type:ignore ) def get_standardized_coefficients(causal_impact_object: causalimpact.CausalImpact): return center_weights( causal_impact_object.model_samples[\"SparseLinearRegression/_weights_noncentered\"],", "absolute_effects = absolute_effects[absolute_effects >= min_abs_effect_size_cut] fig, axes = plt.subplots(1, 2, figsize=figsize) sns.distplot(absolute_effects, ax=axes[0])", "pd.DataFrame: \"\"\" Returns a table with the sum, median, average and percentiles of", "Fig_and_Axes: \"\"\" Plots distribution of absolute and relative effects Filters by a max_p_value", "\"\"\" Plots the most important control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\"", "the most important control time series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic", "pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75, 0.9], ) -> pd.DataFrame:", "functions to report results of experiments # # ############ from os import error", "except Exception as e: print(e) def plot_distribution_of_results( experiment_results: Dict[str, ExperimentOutput], max_p_value_cut: float =", "significance_cutoff: float, max_number_of_columns: int ) -> Dict[str, List[str]]: \"\"\" Per experiment id, gets", "axes def summary_of_results( results_df: pd.DataFrame, max_p_value_cut: float, min_abs_effect_size_cut: float, percentiles=[0.1, 0.25, 0.5, 0.75,", "series \"\"\" pass def get_diagnostic_values_per_experiment(): \"\"\" Gets diagnostic values per experiment, such as" ]
[ "mul a a mod a 5 snd a set a 0 rcv a", "solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add a 2", "TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add a 2 mul a a", "1 add a 2 mul a a mod a 5 snd a set", "self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153) if __name__", "\"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3),", "1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): #", "a jgz a -1 set a 1 jgz a -2 \"\"\" def test_one(self):", "\"\"\" set a 1 add a 2 mul a a mod a 5", "2 mul a a mod a 5 snd a set a 0 rcv", "set a 0 rcv a jgz a -1 set a 1 jgz a", "a 1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase):", "def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153)", "unittest from solution import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a", "class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add a 2 mul a", "from solution import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1", "jgz a -1 set a 1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT),", "import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add a", "a mod a 5 snd a set a 0 rcv a jgz a", "5 snd a set a 0 rcv a jgz a -1 set a", "import unittest from solution import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set", "TEST_INPUT = \"\"\" set a 1 add a 2 mul a a mod", "a 1 add a 2 mul a a mod a 5 snd a", "a 5 snd a set a 0 rcv a jgz a -1 set", "0 rcv a jgz a -1 set a 1 jgz a -2 \"\"\"", "mod a 5 snd a set a 0 rcv a jgz a -1", "4) # class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153) if __name__ ==", "# class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153) if __name__ == \"__main__\":", "set a 1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class", "test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153) if", "a 0 rcv a jgz a -1 set a 1 jgz a -2", "rcv a jgz a -1 set a 1 jgz a -2 \"\"\" def", "solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add a 2 mul", "<reponame>AlecRosenbaum/adventofcode2017<gh_stars>0 import unittest from solution import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\"", "a 2 mul a a mod a 5 snd a set a 0", "add a 2 mul a a mod a 5 snd a set a", "-1 set a 1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) #", "class TestPartTwo(unittest.TestCase): # def test_one(self): # self.assertEqual(solution_part_two(3), 1222153) if __name__ == \"__main__\": unittest.main()", "a a mod a 5 snd a set a 0 rcv a jgz", "solution import solution_part_one, solution_part_two class TestPartOne(unittest.TestCase): TEST_INPUT = \"\"\" set a 1 add", "jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def", "a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self):", "-2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4) # class TestPartTwo(unittest.TestCase): # def test_one(self): #", "snd a set a 0 rcv a jgz a -1 set a 1", "a set a 0 rcv a jgz a -1 set a 1 jgz", "a -1 set a 1 jgz a -2 \"\"\" def test_one(self): self.assertEqual(solution_part_one(self.TEST_INPUT), 4)", "set a 1 add a 2 mul a a mod a 5 snd", "= \"\"\" set a 1 add a 2 mul a a mod a" ]
[ "TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y)", "= TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not", "y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x) or len(x) >", "hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y", "<reponame>v-smwang/HanLP # -*- coding:utf-8 -*- # Author: hankcs # Date: 2020-01-11 18:37 from", "from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x,", "coding:utf-8 -*- # Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN", "transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if", "Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import", "for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x) or", "MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN):", "-*- # Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from", "x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x) or len(x)", "import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in", "import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) ==", "18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for", "TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x)", "transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x) or len(x) > 126: print(x)", "in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x) == len(y) if not len(x) or len(x) > 126:", "from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert", "# -*- coding:utf-8 -*- # Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra", "hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform", "# Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform", "Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform =", "hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128) for x, y in transform.file_to_inputs(MSRA_NER_TRAIN): assert len(x)", "# Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform", "-*- coding:utf-8 -*- # Author: hankcs # Date: 2020-01-11 18:37 from hanlp.datasets.ner.msra import", "2020-01-11 18:37 from hanlp.datasets.ner.msra import MSRA_NER_TRAIN from hanlp.components.taggers.transformers.transformer_transform import TransformerTransform transform = TransformerTransform(max_seq_length=128)" ]
[ "odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even)", "str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2", "(odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even) rev=even continue i+=1", "if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1])", "def XXX(self, s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0]", "maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen:", "len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even) rev=even continue i+=1 return rev", "<filename>Dataset/Leetcode/train/5/479.py class Solution: def XXX(self, s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0", "and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even) rev=even continue i+=1 return", "len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2]", "Solution: def XXX(self, s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s)", "if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue", "s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2]", "XXX(self, s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]):", "if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0):", "i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1]", "even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and", "even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even) rev=even", "while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd", "i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd)", "if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen): maxlen=len(even) rev=even continue", "maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else:", "str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s:", "rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if", "class Solution: def XXX(self, s: str) -> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1", "s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2]", "odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1]", "-> str: if s==s[::-1]:return s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while", "else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and len(odd)>maxlen: rev=odd maxlen=len(odd) continue if(even==even[::-1] and len(even)>maxlen):", "s maxlen,middle=1,0 i=1 len_s=len(s) rev=s[0] if(s[0]==s[1]): rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2]", "rev=s[0:2] maxlen=2 while i<len_s: if(maxlen%2!=0): odd=s[i-maxlen//2-1:i+maxlen//2+2] even=s[i-maxlen//2:i+maxlen//2+2] else: odd=s[i-maxlen//2:i+maxlen//2+1] even=s[i-maxlen//2:i+maxlen//2+2] if (odd==odd[::-1]) and" ]
[ "sol(arr, n, k): f = None for i in range(n): if arr[i] <=", "<reponame>vikas-t/DS-Algo #!/usr/bin/python #https://practice.geeksforgeeks.org/problems/floor-in-a-sorted-array/0 def sol(arr, n, k): f = None for i in", "def sol(arr, n, k): f = None for i in range(n): if arr[i]", "k): f = None for i in range(n): if arr[i] <= k: f", "#https://practice.geeksforgeeks.org/problems/floor-in-a-sorted-array/0 def sol(arr, n, k): f = None for i in range(n): if", "in range(n): if arr[i] <= k: f = i if not f: return", "None for i in range(n): if arr[i] <= k: f = i if", "range(n): if arr[i] <= k: f = i if not f: return -1", "for i in range(n): if arr[i] <= k: f = i if not", "if arr[i] <= k: f = i if not f: return -1 return", "= None for i in range(n): if arr[i] <= k: f = i", "arr[i] <= k: f = i if not f: return -1 return f", "n, k): f = None for i in range(n): if arr[i] <= k:", "f = None for i in range(n): if arr[i] <= k: f =", "#!/usr/bin/python #https://practice.geeksforgeeks.org/problems/floor-in-a-sorted-array/0 def sol(arr, n, k): f = None for i in range(n):", "i in range(n): if arr[i] <= k: f = i if not f:" ]
[ "image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image':", "'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with open(filename,", "from struct import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s',", "str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y))", "recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with open(filename, 'rb') as f:", "= unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, =", "'recognized': recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with open(filename, 'rb') as", "i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x", "[] for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) +", "file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code,", "return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image }", "unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] for i in range(n_strokes):", "file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] for i in range(n_strokes): n_points,", "fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points))", "struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do something with the drawing print(drawing['image'])", "except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do something with the drawing", "yield unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do something with", "x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id':", "= unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y", "'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with open(filename, 'rb') as f: while", "unpack_drawings(filename): with open(filename, 'rb') as f: while True: try: yield unpack_drawing(f) except struct.error:", "= unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id,", "file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2))", "try: yield unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do something", "as f: while True: try: yield unpack_drawing(f) except struct.error: break for drawing in", "'rb') as f: while True: try: yield unpack_drawing(f) except struct.error: break for drawing", "unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = []", "y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized':", "country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes,", "unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code':", "import struct from struct import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code,", "} def unpack_drawings(filename): with open(filename, 'rb') as f: while True: try: yield unpack_drawing(f)", "unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do something with the", "True: try: yield unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): # do", "= unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, =", "unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp':", "n_strokes, = unpack('H', file_handle.read(2)) image = [] for i in range(n_strokes): n_points, =", "unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, =", "in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x =", "open(filename, 'rb') as f: while True: try: yield unpack_drawing(f) except struct.error: break for", "file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp,", "image = [] for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt =", "key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp,", "unpack('H', file_handle.read(2)) image = [] for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2))", "recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image", "unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y =", "= unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image =", "'image': image } def unpack_drawings(filename): with open(filename, 'rb') as f: while True: try:", "unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H',", "y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image", "file_handle.read(2)) image = [] for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt", "file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4))", "timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] for i", "struct import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2))", "unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1))", "file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] for", "file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt,", "country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with open(filename, 'rb')", "import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized,", "for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B'", "n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points))", "def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b',", "timestamp, 'image': image } def unpack_drawings(filename): with open(filename, 'rb') as f: while True:", "= unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized,", "f: while True: try: yield unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'):", "= str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x,", "while True: try: yield unpack_drawing(f) except struct.error: break for drawing in unpack_drawings('test_image2.jpeg'): #", "struct from struct import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, =", "= unpack('H', file_handle.read(2)) image = [] for i in range(n_strokes): n_points, = unpack('H',", "= unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] for i in", "'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename):", "def unpack_drawings(filename): with open(filename, 'rb') as f: while True: try: yield unpack_drawing(f) except", "+ 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return", "'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) return {", "{ 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image } def", "range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt,", "image } def unpack_drawings(filename): with open(filename, 'rb') as f: while True: try: yield", "with open(filename, 'rb') as f: while True: try: yield unpack_drawing(f) except struct.error: break", "= [] for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points)", "unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I',", "key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image } def unpack_drawings(filename): with", "<reponame>aadiljamal/project import struct from struct import unpack def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8))" ]
[ "again. This method is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is", "For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without queue.", "= self.rate self.last_check = self.now() \"\"\" Set up the rate limiter with the", "given number of actions in the given interval in seconds. You need to", "A rate limiter to ensure that only a given number of call are", "= self.now() if self.rate < 0: print(\"set up RateLimiter: disabled (no rate limiting)\")", "= self.now() \"\"\" Set up the rate limiter with the given number of", "the given number of actions in the given interval in seconds. You need", "acquire(self): # return immediately if rate limit is disabled if self.rate<0: return #", "your action that should respect the rate limit. In case the rate limit", "rate limiter with the given number of actions in the given interval in", "first to configure the RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions >", "print(\"set up RateLimiter: disabled (no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions", "equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if self.rate <", "rate limit. In case the rate limit is exceeded this method blocks until", "limiter to ensure that only a given number of call are made per", "this method blocks until the given number of actions per interval is fulfiled", "time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second as", "def now(self): return time.time() \"\"\" Call this method before you call your action", "time.time() \"\"\" Call this method before you call your action that should respect", "fulfiled again. This method is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This", "with the given number of actions in the given interval in seconds. You", "seconds. You need to call this method first to configure the RateLimiter \"\"\"", "this method before you call your action that should respect the rate limit.", "import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate", "is fulfiled again. This method is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm", "need to call this method first to configure the RateLimiter \"\"\" def setup(self,", "self.last_check = current self.allowance += time_passed * (self.rate / self.per) if self.allowance >", "disabled (no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\")", "print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return time.time() \"\"\"", "actions per \",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call this method before", "used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without queue. The bucket", "seconds. \"\"\" @synchronized def acquire(self): # return immediately if rate limit is disabled", "> self.rate: self.allowance = self.rate self.allowance -= 1.0 if self.allowance < 1.0: #", "if self.rate < 0: print(\"set up RateLimiter: disabled (no rate limiting)\") else: print(\"set", "every rate per seconds. \"\"\" @synchronized def acquire(self): # return immediately if rate", "rate limiter to ensure that only a given number of call are made", "\"\"\" Set up the rate limiter with the given number of actions in", "from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate limiter to", "self.now() if self.rate < 0: print(\"set up RateLimiter: disabled (no rate limiting)\") else:", "limit is exceeded this method blocks until the given number of actions per", "are made per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50", "= current - self.last_check self.last_check = current self.allowance += time_passed * (self.rate /", "to ensure that only a given number of call are made per given", "method before you call your action that should respect the rate limit. In", "+= time_passed * (self.rate / self.per) if self.allowance > self.rate: self.allowance = self.rate", "actions in the given interval in seconds. You need to call this method", "coding: utf-8 -*-1 import threading import functools import time from downloader.ThreadSafe import SingletonMixin", "respect the rate limit. In case the rate limit is exceeded this method", "functools import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A", "@synchronized def acquire(self): # return immediately if rate limit is disabled if self.rate<0:", "given number of call are made per given time interval \"\"\" class RateLimiter(SingletonMixin):", "actions per interval is fulfiled again. This method is thread safe. For algorithm", "\",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call this method before you call", "\"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second as default. Call", "or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if self.rate", "if self.rate<0: return # else process the acquire request, and block until token", "import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate limiter to ensure that", "exceeded this method blocks until the given number of actions per interval is", "only a given number of call are made per given time interval \"\"\"", "RateLimiter: disabled (no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\"", "is rate. The allowance += … line is an optimization of adding a", "without queue. The bucket is allowance. The bucket size is rate. The allowance", "setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check =", "self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\" Set up the rate limiter", "#!bin/python3 # -*- coding: utf-8 -*-1 import threading import functools import time from", "see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without queue. The bucket is", "RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call this", "this method first to configure the RateLimiter \"\"\" def setup(self, number_actions, interval): if", "to configure the RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions > 0.0", "Set up the rate limiter with the given number of actions in the", "as default. Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance =", "Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check", "> 0.0 and number_actions < 1.0: raise Exception(\"number_actions needs to be greater or", "made per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions", "action that should respect the rate limit. In case the rate limit is", "0: print(\"set up RateLimiter: disabled (no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\"", "if number_actions > 0.0 and number_actions < 1.0: raise Exception(\"number_actions needs to be", "the rate limiter with the given number of actions in the given interval", "the rate limit. In case the rate limit is exceeded this method blocks", "be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now()", "bucket algorithm without queue. The bucket is allowance. The bucket size is rate.", "is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket", "else process the acquire request, and block until token is available current =", "acquire request, and block until token is available current = self.now() time_passed =", "a given number of call are made per given time interval \"\"\" class", "limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return", "interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second as default.", "import functools import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\"", "threading import functools import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized", "interval in seconds. You need to call this method first to configure the", "1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if self.rate < 0:", "class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second as default. Call setup", "self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if self.rate < 0: print(\"set", "is available current = self.now() time_passed = current - self.last_check self.last_check = current", "self.rate: self.allowance = self.rate self.allowance -= 1.0 if self.allowance < 1.0: # wait", "\"\"\" Constructor with 50 actions per second as default. Call setup to reconfigure", "given interval in seconds. You need to call this method first to configure", "blocks until the given number of actions per interval is fulfiled again. This", "This is a token bucket algorithm without queue. The bucket is allowance. The", "-*- coding: utf-8 -*-1 import threading import functools import time from downloader.ThreadSafe import", "call your action that should respect the rate limit. In case the rate", "current = self.now() time_passed = current - self.last_check self.last_check = current self.allowance +=", "number of call are made per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\"", "In case the rate limit is exceeded this method blocks until the given", "interval is fulfiled again. This method is thread safe. For algorithm used see:", "algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without queue. The", "< 1.0: raise Exception(\"number_actions needs to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval)", "to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now()", "of adding a token every rate per seconds. \"\"\" @synchronized def acquire(self): #", "= self.now() time_passed = current - self.last_check self.last_check = current self.allowance += time_passed", "+= … line is an optimization of adding a token every rate per", "bucket size is rate. The allowance += … line is an optimization of", "The bucket size is rate. The allowance += … line is an optimization", "else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return time.time()", "is disabled if self.rate<0: return # else process the acquire request, and block", "import threading import functools import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import", "from downloader.ThreadSafe import synchronized \"\"\" A rate limiter to ensure that only a", "and number_actions < 1.0: raise Exception(\"number_actions needs to be greater or equal 1.0\")", "self.last_check = self.now() if self.rate < 0: print(\"set up RateLimiter: disabled (no rate", "an optimization of adding a token every rate per seconds. \"\"\" @synchronized def", "downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate limiter to ensure", "the acquire request, and block until token is available current = self.now() time_passed", "up the rate limiter with the given number of actions in the given", "return # else process the acquire request, and block until token is available", "allowance. The bucket size is rate. The allowance += … line is an", "queue. The bucket is allowance. The bucket size is rate. The allowance +=", "return time.time() \"\"\" Call this method before you call your action that should", "case the rate limit is exceeded this method blocks until the given number", "until token is available current = self.now() time_passed = current - self.last_check self.last_check", "\",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call this method", "default. Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate", "< 0: print(\"set up RateLimiter: disabled (no rate limiting)\") else: print(\"set up RateLimiter:", "self.rate self.last_check = self.now() if self.rate < 0: print(\"set up RateLimiter: disabled (no", "self.allowance = self.rate self.last_check = self.now() \"\"\" Set up the rate limiter with", "(no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def", "greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if", "ensure that only a given number of call are made per given time", "is a token bucket algorithm without queue. The bucket is allowance. The bucket", "self.last_check = self.now() \"\"\" Set up the rate limiter with the given number", "bucket is allowance. The bucket size is rate. The allowance += … line", "current - self.last_check self.last_check = current self.allowance += time_passed * (self.rate / self.per)", "self.allowance > self.rate: self.allowance = self.rate self.allowance -= 1.0 if self.allowance < 1.0:", "downloader.ThreadSafe import synchronized \"\"\" A rate limiter to ensure that only a given", "per seconds. \"\"\" @synchronized def acquire(self): # return immediately if rate limit is", "to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check =", "of actions per interval is fulfiled again. This method is thread safe. For", "- self.last_check self.last_check = current self.allowance += time_passed * (self.rate / self.per) if", "0.0 and number_actions < 1.0: raise Exception(\"number_actions needs to be greater or equal", "in the given interval in seconds. You need to call this method first", "RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions > 0.0 and number_actions <", "import synchronized \"\"\" A rate limiter to ensure that only a given number", "self.rate < 0: print(\"set up RateLimiter: disabled (no rate limiting)\") else: print(\"set up", "# -*- coding: utf-8 -*-1 import threading import functools import time from downloader.ThreadSafe", "number of actions per interval is fulfiled again. This method is thread safe.", "per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per", "\"\"\" def setup(self, number_actions, interval): if number_actions > 0.0 and number_actions < 1.0:", "= self.rate self.allowance -= 1.0 if self.allowance < 1.0: # wait until next", "if self.allowance < 1.0: # wait until next bucket is available time.sleep( (1-self.allowance)", "with 50 actions per second as default. Call setup to reconfigure \"\"\" def", "if rate limit is disabled if self.rate<0: return # else process the acquire", "the rate limit is exceeded this method blocks until the given number of", "adding a token every rate per seconds. \"\"\" @synchronized def acquire(self): # return", "\"\"\" Call this method before you call your action that should respect the", "\"\"\" @synchronized def acquire(self): # return immediately if rate limit is disabled if", "number of actions in the given interval in seconds. You need to call", "rate per seconds. \"\"\" @synchronized def acquire(self): # return immediately if rate limit", "in seconds. You need to call this method first to configure the RateLimiter", "allowance += … line is an optimization of adding a token every rate", "is an optimization of adding a token every rate per seconds. \"\"\" @synchronized", "utf-8 -*-1 import threading import functools import time from downloader.ThreadSafe import SingletonMixin from", "second as default. Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance", "return immediately if rate limit is disabled if self.rate<0: return # else process", "rate limit is disabled if self.rate<0: return # else process the acquire request,", "self.allowance -= 1.0 if self.allowance < 1.0: # wait until next bucket is", "-= 1.0 if self.allowance < 1.0: # wait until next bucket is available", "You need to call this method first to configure the RateLimiter \"\"\" def", "1.0: raise Exception(\"number_actions needs to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance", "rate limit is exceeded this method blocks until the given number of actions", "now(self): return time.time() \"\"\" Call this method before you call your action that", "1.0 if self.allowance < 1.0: # wait until next bucket is available time.sleep(", "/ self.per) if self.allowance > self.rate: self.allowance = self.rate self.allowance -= 1.0 if", "given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second", "needs to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate self.last_check", "until the given number of actions per interval is fulfiled again. This method", "reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\"", "and block until token is available current = self.now() time_passed = current -", "that only a given number of call are made per given time interval", "* (self.rate / self.per) if self.allowance > self.rate: self.allowance = self.rate self.allowance -=", "self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\" Set up the rate", "immediately if rate limit is disabled if self.rate<0: return # else process the", "method blocks until the given number of actions per interval is fulfiled again.", "self.now() time_passed = current - self.last_check self.last_check = current self.allowance += time_passed *", "thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm", "RateLimiter(SingletonMixin): \"\"\" Constructor with 50 actions per second as default. Call setup to", "\"\"\" def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\" Set", "current self.allowance += time_passed * (self.rate / self.per) if self.allowance > self.rate: self.allowance", "number_actions > 0.0 and number_actions < 1.0: raise Exception(\"number_actions needs to be greater", "process the acquire request, and block until token is available current = self.now()", "per \",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call this method before you", "size is rate. The allowance += … line is an optimization of adding", "a token bucket algorithm without queue. The bucket is allowance. The bucket size", "a token every rate per seconds. \"\"\" @synchronized def acquire(self): # return immediately", "# else process the acquire request, and block until token is available current", "call this method first to configure the RateLimiter \"\"\" def setup(self, number_actions, interval):", "self.rate<0: return # else process the acquire request, and block until token is", "time_passed = current - self.last_check self.last_check = current self.allowance += time_passed * (self.rate", "should respect the rate limit. In case the rate limit is exceeded this", "self.allowance = self.rate self.allowance -= 1.0 if self.allowance < 1.0: # wait until", "token is available current = self.now() time_passed = current - self.last_check self.last_check =", "time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate limiter", "def setup(self, number_actions, interval): if number_actions > 0.0 and number_actions < 1.0: raise", "up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self): return time.time() \"\"\" Call", "self.allowance < 1.0: # wait until next bucket is available time.sleep( (1-self.allowance) *", "number_actions < 1.0: raise Exception(\"number_actions needs to be greater or equal 1.0\") self.rate=float(number_actions)", "< 1.0: # wait until next bucket is available time.sleep( (1-self.allowance) * (self.per/self.rate))", "given number of actions per interval is fulfiled again. This method is thread", "Call this method before you call your action that should respect the rate", "optimization of adding a token every rate per seconds. \"\"\" @synchronized def acquire(self):", "\"\"\" A rate limiter to ensure that only a given number of call", "the given number of actions per interval is fulfiled again. This method is", "per second as default. Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0 self.per=1.0", "the RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions > 0.0 and number_actions", "method is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token", "token bucket algorithm without queue. The bucket is allowance. The bucket size is", "SingletonMixin from downloader.ThreadSafe import synchronized \"\"\" A rate limiter to ensure that only", "to call this method first to configure the RateLimiter \"\"\" def setup(self, number_actions,", "Constructor with 50 actions per second as default. Call setup to reconfigure \"\"\"", "self.per=float(interval) self.allowance = self.rate self.last_check = self.now() if self.rate < 0: print(\"set up", "limit. In case the rate limit is exceeded this method blocks until the", "-*-1 import threading import functools import time from downloader.ThreadSafe import SingletonMixin from downloader.ThreadSafe", "self.now() \"\"\" Set up the rate limiter with the given number of actions", "seconds\") def now(self): return time.time() \"\"\" Call this method before you call your", "This method is thread safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a", "self.allowance += time_passed * (self.rate / self.per) if self.allowance > self.rate: self.allowance =", "if self.allowance > self.rate: self.allowance = self.rate self.allowance -= 1.0 if self.allowance <", "The allowance += … line is an optimization of adding a token every", "rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per \",self.per,\" seconds\") def now(self):", "per interval is fulfiled again. This method is thread safe. For algorithm used", "50 actions per second as default. Call setup to reconfigure \"\"\" def __init__(self):", "interval): if number_actions > 0.0 and number_actions < 1.0: raise Exception(\"number_actions needs to", "= self.rate self.last_check = self.now() if self.rate < 0: print(\"set up RateLimiter: disabled", "(self.rate / self.per) if self.allowance > self.rate: self.allowance = self.rate self.allowance -= 1.0", "The bucket is allowance. The bucket size is rate. The allowance += …", "that should respect the rate limit. In case the rate limit is exceeded", "request, and block until token is available current = self.now() time_passed = current", "rate. The allowance += … line is an optimization of adding a token", "safe. For algorithm used see: https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without", "Exception(\"number_actions needs to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance = self.rate", "line is an optimization of adding a token every rate per seconds. \"\"\"", "time_passed * (self.rate / self.per) if self.allowance > self.rate: self.allowance = self.rate self.allowance", "self.rate self.last_check = self.now() \"\"\" Set up the rate limiter with the given", "before you call your action that should respect the rate limit. In case", "token every rate per seconds. \"\"\" @synchronized def acquire(self): # return immediately if", "raise Exception(\"number_actions needs to be greater or equal 1.0\") self.rate=float(number_actions) self.per=float(interval) self.allowance =", "setup(self, number_actions, interval): if number_actions > 0.0 and number_actions < 1.0: raise Exception(\"number_actions", "available current = self.now() time_passed = current - self.last_check self.last_check = current self.allowance", "synchronized \"\"\" A rate limiter to ensure that only a given number of", "https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm This is a token bucket algorithm without queue. The bucket is allowance.", "def acquire(self): # return immediately if rate limit is disabled if self.rate<0: return", "self.rate self.allowance -= 1.0 if self.allowance < 1.0: # wait until next bucket", "block until token is available current = self.now() time_passed = current - self.last_check", "def __init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\" Set up", "the given interval in seconds. You need to call this method first to", "actions per second as default. Call setup to reconfigure \"\"\" def __init__(self): self.rate=50.0", "of actions in the given interval in seconds. You need to call this", "__init__(self): self.rate=50.0 self.per=1.0 self.allowance = self.rate self.last_check = self.now() \"\"\" Set up the", "= current self.allowance += time_passed * (self.rate / self.per) if self.allowance > self.rate:", "call are made per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor with", "limiter with the given number of actions in the given interval in seconds.", "up RateLimiter: disabled (no rate limiting)\") else: print(\"set up RateLimiter: \",self.rate,\" actions per", "algorithm without queue. The bucket is allowance. The bucket size is rate. The", "self.allowance = self.rate self.last_check = self.now() if self.rate < 0: print(\"set up RateLimiter:", "# return immediately if rate limit is disabled if self.rate<0: return # else", "self.last_check self.last_check = current self.allowance += time_passed * (self.rate / self.per) if self.allowance", "self.per) if self.allowance > self.rate: self.allowance = self.rate self.allowance -= 1.0 if self.allowance", "of call are made per given time interval \"\"\" class RateLimiter(SingletonMixin): \"\"\" Constructor", "number_actions, interval): if number_actions > 0.0 and number_actions < 1.0: raise Exception(\"number_actions needs", "limit is disabled if self.rate<0: return # else process the acquire request, and", "method first to configure the RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions", "… line is an optimization of adding a token every rate per seconds.", "configure the RateLimiter \"\"\" def setup(self, number_actions, interval): if number_actions > 0.0 and", "disabled if self.rate<0: return # else process the acquire request, and block until", "you call your action that should respect the rate limit. In case the", "is exceeded this method blocks until the given number of actions per interval", "is allowance. The bucket size is rate. The allowance += … line is", "<reponame>pixolution/PixolutionImageDownloader<gh_stars>1-10 #!bin/python3 # -*- coding: utf-8 -*-1 import threading import functools import time" ]
[ "noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = [] for cls in Channel.__subclasses__():", "import Channel # noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = [] for", "lebanese_channels.channel import Channel # noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = []", "from lebanese_channels.channel import Channel # noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST =", "* CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST = sorted(CHANNEL_LIST, key=lambda", "Channel # noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = [] for cls", "from lebanese_channels.services import * CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST", "import * CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST = sorted(CHANNEL_LIST,", "CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST = sorted(CHANNEL_LIST, key=lambda x:", "lebanese_channels.services import * CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST =", "PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls())", "# noinspection PyUnresolvedReferences from lebanese_channels.services import * CHANNEL_LIST = [] for cls in", "= [] for cls in Channel.__subclasses__(): CHANNEL_LIST.append(cls()) CHANNEL_LIST = sorted(CHANNEL_LIST, key=lambda x: x.get_name())" ]
[]
[ "unittest.mock import patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL)", "Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def", "and became out of date. This was not a ' 'problem with your", "'problem with your site build, but if you restart ' 'the failed build", "DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter =", "assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid':", "priv_values = [] msg = '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg})", "test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg':", "= {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name)", "not a ' 'problem with your site build, but if you restart '", "priv_values = ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask)", "= LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage()", "== 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo':", "'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys =", "= LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True)", "' 'the failed build it should work on the next try. ' 'Sorry", "import patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from", "out of date. This was not a ' 'problem with your site build,", "mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar ==", "False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record", "logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar']", "= 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result", "assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter =", "TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record", "TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args", "= filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}'", "def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter =", "record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}')", "def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record =", "logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name =", "'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result", "next try. ' 'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self,", "mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record =", "['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record)", "name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([],", "'') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record", "keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '')", "== '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys)", "class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values)", "f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is", "= logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops,", "= 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter)", "def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level']", "def test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter = LogFilter(priv_values) record =", "test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg':", "for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar']", "msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result =", "filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask", "msg}) result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg", "This was not a ' 'problem with your site build, but if you", "priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg})", "it should work on the next try. ' 'Sorry for the inconvenience!' ))", "test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values,", "log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler", "result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values =", "= 'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record)", "' 'problem with your site build, but if you restart ' 'the failed", "== f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter = LogFilter(priv_values)", "site build, but if you restart ' 'the failed build it should work", "record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self):", "get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values", "filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False)", "logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self):", "= Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self,", "== name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _):", "DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg", "assert(result is True) assert(record.getMessage() == ( 'Whoops, our S3 keys were rotated during", "patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler", "filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg", "['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result =", "= logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def", "'Whoops, our S3 keys were rotated during your ' 'build and became out", "mask = 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg':", "your site build, but if you restart ' 'the failed build it should", "msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values", "def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name)", "'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result", "'' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is", "= filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = []", "def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record =", "the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter", ")) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys)", "f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter", "filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter", "mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0]) == logging.StreamHandler) assert(type(kwargs['handlers'][1]) == DBHandler)", "= f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result", "msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops, our S3", "import logging from unittest.mock import patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger,", "logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self):", "became out of date. This was not a ' 'problem with your site", "get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class", "= [] msg = '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result", "msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record)", "= ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record", "'build and became out of date. This was not a ' 'problem with", "keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar ==", "= get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig')", "record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys", "assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter =", "from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import", "result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg =", "= logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys =", "logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values = []", "' 'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys", "kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0]) == logging.StreamHandler) assert(type(kwargs['handlers'][1])", "build, but if you restart ' 'the failed build it should work on", "= logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values =", "mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs)", "assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs =", "from unittest.mock import patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs,", "result = filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops, our S3 keys", "@patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar':", "is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg = ''", "init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values =", "S3 keys were rotated during your ' 'build and became out of date.", "logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self,", "log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar'", "filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True)", "= ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!')", "'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0]) ==", "LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage()", "( 'Whoops, our S3 keys were rotated during your ' 'build and became", "if you restart ' 'the failed build it should work on the next", "record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}')", "= ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result", "formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def", "'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar'", "mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() ==", "'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra ==", "= ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record)", "['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format')", "import ( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class", "= 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg})", "mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL)", "@patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs", "== f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg = 'hellofoobar'", "class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record", "Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format):", "[] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result =", "TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record =", "assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config,", "' 'build and became out of date. This was not a ' 'problem", "the next try. ' 'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def", "work on the next try. ' 'Sorry for the inconvenience!' )) class TestFormatter():", "formatter.format(record) assert(record.foobar == '') mock_format.assert_called_once_with(record) @patch('logging.Formatter.format') def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter", "name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) ==", "True) assert(record.getMessage() == ( 'Whoops, our S3 keys were rotated during your '", "attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name ==", "@patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _,", "['foobar'] mask = 'TheNumber42' msg = 'hellofoobar' filter = LogFilter(priv_values, mask) record =", "assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg =", "init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) ==", "assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg =", "priv_values = ['foobar'] msg = 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg})", "were rotated during your ' 'build and became out of date. This was", "a ' 'problem with your site build, but if you restart ' 'the", "'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'}", "'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name", "you restart ' 'the failed build it should work on the next try.", "formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class", "was not a ' 'problem with your site build, but if you restart", "<gh_stars>1-10 import logging from unittest.mock import patch from log_utils.get_logger import ( LogFilter, Formatter,", "your ' 'build and became out of date. This was not a '", "def test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'})", "failed build it should work on the next try. ' 'Sorry for the", "@patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({})", "is False) def test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values)", "filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops, our S3 keys were rotated", "( LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter():", "logging from unittest.mock import patch from log_utils.get_logger import ( LogFilter, Formatter, get_logger, init_logging,", "'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is", "is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42'", "_, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0]) == logging.StreamHandler)", "== attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234},", "True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask = 'TheNumber42' msg", "= filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops, our S3 keys were", "of date. This was not a ' 'problem with your site build, but", "logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == ( 'Whoops, our", "= LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False) def", "class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter", "import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg = 'hellofoobar' filter", "attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo')", "date. This was not a ' 'problem with your site build, but if", "1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0])", "[] msg = '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result =", "result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values =", "assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar'] mask =", "assert(record.getMessage() == ( 'Whoops, our S3 keys were rotated during your ' 'build", "== ( 'Whoops, our S3 keys were rotated during your ' 'build and", "= '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result", "test_it_replaces_message_invalid_access_key(self): priv_values = [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg':", "test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter)", "TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs = {'foo': 'bar'} set_log_attrs(attrs) adapter =", "with your site build, but if you restart ' 'the failed build it", "{'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2)", "set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar']", "LogFilter, Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def", "== logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging(): def", "adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect')", "def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record)", "msg = '' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record)", "assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter =", "msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values", "try. ' 'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format):", "rotated during your ' 'build and became out of date. This was not", "set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs)", "= logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name", "_): init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers'])", "= [] msg = f'hello{LogFilter.INVALID_ACCESS_KEY}' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result", "our S3 keys were rotated during your ' 'build and became out of", "restart ' 'the failed build it should work on the next try. '", "should work on the next try. ' 'Sorry for the inconvenience!' )) class", "mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar", "msg = 'hellofoobar' filter = LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record)", "f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter = LogFilter(priv_values) record", "{'foo': 'bar'} set_log_attrs(attrs) adapter = get_logger(name) assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra", "assert(type(adapter) == logging.LoggerAdapter) assert(adapter.logger.name == name) assert(adapter.extra == attrs) @patch('psycopg2.connect') @patch('logging.basicConfig') class TestInitLogging():", "build it should work on the next try. ' 'Sorry for the inconvenience!'", "True) assert(record.getMessage() == f'hello{mask}') def test_it_does_not_log_empty_messages(self): priv_values = [] msg = '' filter", "from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self): priv_values = ['foobar'] msg =", "but if you restart ' 'the failed build it should work on the", "test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs = mock_basic_config.call_args assert(kwargs['level'] ==", "filter = LogFilter(priv_values, mask) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is", "= filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def test_it_filters_message_with_custom_mask(self): priv_values = ['foobar']", "inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format') def test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter =", "formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger(): def test_it_returns_a_logger_with_an_adapter_with_extras(self): name = 'foobar' attrs", "LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() ==", "class TestInitLogging(): def test_it_adds_a_stream_and_db_handlers(self, mock_basic_config, _): init_logging([], {'buildid': 1234}, 'foo') _, kwargs =", "= mock_basic_config.call_args assert(kwargs['level'] == DEFAULT_LOG_LEVEL) assert(len(kwargs['handlers']) == 2) assert(type(kwargs['handlers'][0]) == logging.StreamHandler) assert(type(kwargs['handlers'][1]) ==", "LogFilter(priv_values) record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self):", "test_it_populates_empty_strings_if_key_is_missing(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({}) formatter.format(record) assert(record.foobar", "Formatter, get_logger, init_logging, set_log_attrs, DEFAULT_LOG_LEVEL) from log_utils.db_handler import DBHandler class TestLogFilter(): def test_it_filters_message_with_default_mask(self):", "= logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == f'hello{LogFilter.DEFAULT_MASK}') def", "on the next try. ' 'Sorry for the inconvenience!' )) class TestFormatter(): @patch('logging.Formatter.format')", "'the failed build it should work on the next try. ' 'Sorry for", "is True) assert(record.getMessage() == ( 'Whoops, our S3 keys were rotated during your", "during your ' 'build and became out of date. This was not a", "= Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record) assert(record.foobar == 'Hello!') mock_format.assert_called_once_with(record) class TestGetLogger():", "record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is False) def test_it_replaces_message_invalid_access_key(self): priv_values", "keys were rotated during your ' 'build and became out of date. This", "test_it_ignores_key_if_present(self, mock_format): keys = ['foobar'] formatter = Formatter(keys) record = logging.makeLogRecord({'foobar': 'Hello!'}) formatter.format(record)", "record = logging.makeLogRecord({'msg': msg}) result = filter.filter(record) assert(result is True) assert(record.getMessage() == (" ]
[ "from ursina.prefabs.first_person_controller import FirstPersonController class Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position", "__init__(self, position, init_health): super(Player, self).__init__() self.position = position self.health = init_health self.last_max_jump_pos =", "def __init__(self, position, init_health): super(Player, self).__init__() self.position = position self.health = init_health self.last_max_jump_pos", "position, init_health): super(Player, self).__init__() self.position = position self.health = init_health self.last_max_jump_pos = 0", "ursina.prefabs.first_person_controller import FirstPersonController class Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position =", "import FirstPersonController class Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position = position", "Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position = position self.health = init_health", "FirstPersonController class Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position = position self.health", "class Player(FirstPersonController): def __init__(self, position, init_health): super(Player, self).__init__() self.position = position self.health =" ]
[ "= torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z = self.w1(x) +", "self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self,", "self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features,", "A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size)", "hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2", "self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred,", "torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1", "if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT,", "M) h = self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1) A_pred =", "z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def", "= self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return", "super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1 =", "GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M): h = self.conv1(x, adj, M)", "import torch.nn as nn import torch.nn.functional as F from layer import GATLayer device", "x, adj, M): z = self.w1(x) + iden A_pred = torch.sigmoid(torch.matmul(z, z.t())) return", "self.conv1(x, adj, M) h = self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1)", "Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 =", "M) z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def", "pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden =", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size,", "GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features,", "num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points,", "GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M):", "torch.nn as nn import torch.nn.functional as F from layer import GATLayer device =", "else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size", "adj, M) h = self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1) A_pred", "embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size,", "self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x,", "A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module):", "from layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module):", "= nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x,", "as F from layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self,", "torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__()", "nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj,", "F from layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class", "torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z = self.w1(x) + iden", "embedding_size, alpha) def forward(self, x, adj, M): h = self.conv1(x, adj, M) h", "M): z = self.w1(x) + iden A_pred = torch.sigmoid(torch.matmul(z, z.t())) return A_pred, z", "adj, M): h = self.conv1(x, adj, M) h = self.conv2(h, adj, M) z", "def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data", "class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size", "GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size", "= F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z):", "A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t()))", "def forward(self, x, adj, M): z = self.w1(x) + iden A_pred = torch.sigmoid(torch.matmul(z,", "forward(self, x, adj, M): z = self.w1(x) + iden A_pred = torch.sigmoid(torch.matmul(z, z.t()))", "nn import torch.nn.functional as F from layer import GATLayer device = torch.device('cuda' if", "as nn import torch.nn.functional as F from layer import GATLayer device = torch.device('cuda'", "F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred", "import torch.nn.functional as F from layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available()", "num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha", "alpha) def forward(self, x, adj, M): h = self.conv1(x, adj, M) h =", "super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True)", "adj, M) z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z", "hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z = self.w1(x) + iden A_pred", "nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z = self.w1(x)", "import torch import torch.nn as nn import torch.nn.functional as F from layer import", "dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features,", "= self.conv1(x, adj, M) h = self.conv2(h, adj, M) z = F.normalize(h, p=2,", "torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha):", "forward(self, x, adj, M): h = self.conv1(x, adj, M) h = self.conv2(h, adj,", "alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1", "return A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class", "self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred", "__init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size", "= GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M): h = self.conv1(x, adj,", "def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size =", "def forward(self, x, adj, M): h = self.conv1(x, adj, M) h = self.conv2(h,", "hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M): h", "torch import torch.nn as nn import torch.nn.functional as F from layer import GATLayer", "self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z", "= embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size,", "torch.nn.functional as F from layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else", "A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat,", "self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def", "hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M):", "= self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return", "return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features,", "__init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data =", "p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred =", "M): h = self.conv1(x, adj, M) h = self.conv2(h, adj, M) z =", "= nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device), requires_grad=True) def forward(self, x, adj, M): z =", "hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha =", "= GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj,", "dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z,", "self.embedding_size = embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 =", "= torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size,", "self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha)", "class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden", "z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z) return A_pred, z def dot_product_decode(self,", "hidden_size): super(pseudo_gat, self).__init__() self.w1 = nn.Linear(num_features, hidden_size) self.iden = nn.Parameter(data = torch.randn((num_points, hidden_dims),dtype=torch.float).to(device),", "Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size):", "= hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size, alpha)", "h = self.conv2(h, adj, M) z = F.normalize(h, p=2, dim=1) A_pred = self.dot_product_decode(z)", "import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def __init__(self,", "alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M): h =", "= torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self, num_features, hidden_size): super(pseudo_gat, self).__init__()", "self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha = alpha self.conv1 = GATLayer(num_features, hidden_size,", "'cpu') class GAT(nn.Module): def __init__(self, num_features, hidden_size, embedding_size, alpha): super(GAT, self).__init__() self.hidden_size =", "requires_grad=True) def forward(self, x, adj, M): z = self.w1(x) + iden A_pred =", "x, adj, M): h = self.conv1(x, adj, M) h = self.conv2(h, adj, M)", "adj, M): z = self.w1(x) + iden A_pred = torch.sigmoid(torch.matmul(z, z.t())) return A_pred,", "= alpha self.conv1 = GATLayer(num_features, hidden_size, alpha) self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def", "self.conv2 = GATLayer(hidden_size, embedding_size, alpha) def forward(self, x, adj, M): h = self.conv1(x,", "h = self.conv1(x, adj, M) h = self.conv2(h, adj, M) z = F.normalize(h,", "def dot_product_decode(self, Z): A_pred = torch.sigmoid(torch.matmul(Z, Z.t())) return A_pred class pseudo_gat(nn.Module): def __init__(self,", "layer import GATLayer device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class GAT(nn.Module): def", "embedding_size, alpha): super(GAT, self).__init__() self.hidden_size = hidden_size self.embedding_size = embedding_size self.alpha = alpha" ]
[ "('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入 <code>script</code> 的", "# Generated by Django 2.1.12 on 2019-10-14 14:21 from django.db import migrations, models", "on 2019-10-14 14:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入 <code>script</code> 的 JS'), ), ]", "Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js',", "2019-10-14 14:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend',", "14:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'),", "dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ]", "Django 2.1.12 on 2019-10-14 14:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations =", "[ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入 <code>script</code>", "'0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入 <code>script</code> 的 JS'),", "by Django 2.1.12 on 2019-10-14 14:21 from django.db import migrations, models class Migration(migrations.Migration):", "migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations = [", "class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page',", "= [ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations", "models class Migration(migrations.Migration): dependencies = [ ('frontend', '0002_auto_20191010_0025'), ] operations = [ migrations.AddField(", "2.1.12 on 2019-10-14 14:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "] operations = [ migrations.AddField( model_name='page', name='js', field=models.TextField(blank=True, help_text='会被放入 <code>script</code> 的 JS'), ),", "Generated by Django 2.1.12 on 2019-10-14 14:21 from django.db import migrations, models class" ]
[ "is registered in \"util.metrics\" file. for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(),", "print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\")", "scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t,", "Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"]", "file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist.", "self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the", "optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler", "eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds =", "15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime()", "as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters (in", "map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def", "from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from src.util import visualization from", "'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank ==", "== 0: print(f\"[{timer.duration()} seconds] Training has finished, validation is in progress...\") if self.save_checkpoint_interval", "if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished, validation is", "\"latest_model.tar\". However, the newer information will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir", "def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr", "the model checkpoint will be saved as \"best_model.tar.\" # The newer best-scored checkpoint", "validation is in progress...\") if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval ==", "= config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval", "*.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not", "= torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded successfully from", "metric score (is_best_epoch=True), the checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\"", "= {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"]))", "for epoch in range(self.start_epoch, self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' *", "% self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished, validation is in progress...\")", "checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model", "= self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center)", "eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision,", "latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not", "def __init__(self, dist, rank, config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool =", "win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank,", "to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the epoch number - the", "handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\"", "self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def", "print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\"", "config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft", "the 'train.py' file, if the 'resume' item is True, we will update the", "win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train", "\"\"\" Preload model parameters (in \"*.tar\" format) at the start of experiment. Args:", "parameters, etc. # New checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir /", "6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig,", "% self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score =", "self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } #", "optimizer parameters - the model parameters Args: is_best_epoch (bool): In current epoch, if", "config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the 'resume' item is True, we", "a best metric score (is_best_epoch=True) in the current epoch, # the model checkpoint", "fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _,", "center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval =", "the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does", "partial from pathlib import Path import toml import torch import colorful import numpy", "in metrics_list # Check if the metric is registered in \"util.metrics\" file. for", "file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in", "self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are", "{len(models)} models, the number of the parameters is: \") params_of_all_networks = 0 for", "resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer", "% 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] +", "seconds] Training has finished, validation is in progress...\") if self.save_checkpoint_interval != 0 and", "optimizer self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank = rank", "self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir", "self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config", "Saving {epoch} epoch model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\":", "in the current epoch, # the model checkpoint will be saved as \"best_model.tar.\"", "fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t,", "self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft,", "metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted,", "all checkpoint information, like \"latest_model.tar\". However, the newer information will no overwrite the", "config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >=", "(epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval !=", "# If the model get a best metric score (is_best_epoch=True) in the current", "for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value", "best metric score (is_best_epoch=True), the checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\".", "million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters in the project is {params_of_all_networks", "i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check", "print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters", "\"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if", "be saved as \"best_model.tar.\" # The newer best-scored checkpoint will overwrite the older", "precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t}", "{roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1),", "= score return True else: return False @staticmethod def _print_networks(models: list): print(f\"This project", "overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get", "self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1,", "self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds", "= plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC =", "If the model get a best metric score (is_best_epoch=True) in the current epoch,", "fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr,", "= optimizer self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank =", "predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) #", "if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score", "Args: is_best_epoch (bool): In current epoch, if the model get a best metric", "self.rank = rank self.dist = dist # Automatic mixed precision (AMP) self.use_amp =", "= model self.optimizer = optimizer self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel", "% self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This", "self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self, epoch):", "and score >= self.best_score: self.best_score = score return True elif not save_max_metric_score and", "\\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1]))", "self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] #", "plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}')", "not registered, please check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean", "# Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if", "assert \"ROC_AUC\" in metrics_list # Check if the metric is registered in \"util.metrics\"", "self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler = scheduler self.loss_function = loss_function", "model self.optimizer = optimizer self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel (DDP)", "# DistributedDataParallel (DDP) self.rank = rank self.dist = dist # Automatic mixed precision", "fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics on validation dataset", "numpy as np import matplotlib.pyplot as plt from joblib import Parallel, delayed from", "in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics'", "{epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode()", "\"best_model.tar.\" # The newer best-scored checkpoint will overwrite the older one. if is_best_epoch:", "= DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t,", "= -np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir =", "of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model", "in history - the optimizer parameters - the model parameters Args: is_best_epoch (bool):", "the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint", "consists of: - the epoch number - the best metric score in history", "best metric score (is_best_epoch=True) in the current epoch, # the model checkpoint will", ">= self.best_score: self.best_score = score return True elif not save_max_metric_score and score <=", "like \"latest_model.tar\". However, the newer information will no overwrite the older one. torch.save(state_dict,", "@staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)} models, the number of the", "self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for", "Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as", "check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(),", "file. for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered,", "{'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if", "import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config, resume: bool,", "GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from src.util import visualization", "training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU (process)", "plt from joblib import Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics import", "self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded", "= {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch", "import toml import torch import colorful import numpy as np import matplotlib.pyplot as", "labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer':", ">= 1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score", "\"*.tar\" format) at the start of experiment. Args: model_path (Path): The file path", "self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume:", "global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) #", "self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation", "as np import matplotlib.pyplot as plt from joblib import Parallel, delayed from torch.cuda.amp", "model get a best metric score (is_best_epoch=True) in the current epoch, # the", "value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\":", "self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded.", "checkpoint information, like \"latest_model.tar\". However, the newer information will no overwrite the older", "True elif not save_max_metric_score and score <= self.best_score: self.best_score = score return True", "in enumerate(models, start=1): params_of_network = 0 for param in model.parameters(): params_of_network += param.numel()", "is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def", "is True, we will update the following args: self.start_epoch = 1 self.best_score =", "config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"]", "display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1))", "self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"]", "the parameters is: \") params_of_all_networks = 0 for idx, model in enumerate(models, start=1):", "from joblib import Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay", "The file path of the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(),", "Resume experiment from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert", "recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def", "# Contains all checkpoint information, including the optimizer parameters, the model parameters, etc.", "model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\")", "epoch, # the model checkpoint will be saved as \"best_model.tar.\" # The newer", "self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist", "# Contains all checkpoint information, like \"latest_model.tar\". However, the newer information will no", "'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs", "assert self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In", "_ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(),", "self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length", "= checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded. Training will begin at", "\"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the epoch", "save_max_metric_score and score <= self.best_score: self.best_score = score return True else: return False", "the best metric score in history - the optimizer parameters - the model", "best score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self,", "self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean", "dist # Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) #", "newer best-scored checkpoint will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a", "epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds", "config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text(", "https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location)", "import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from src.util import", "score return True elif not save_max_metric_score and score <= self.best_score: self.best_score = score", "one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best metric", "self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer':", "Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the epoch number", "src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def", "best-scored checkpoint will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best", "self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError", "resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations", "# Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics", "are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" *", "/ \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint()", "torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best metric score", "score return True else: return False @staticmethod def _print_networks(models: list): print(f\"This project contains", "(self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current model", "including the optimizer parameters, the model parameters, etc. # New checkpoint will overwrite", "torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current", "STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"]", "models, the number of the parameters is: \") params_of_all_networks = 0 for idx,", "colorful import numpy as np import matplotlib.pyplot as plt from joblib import Parallel,", "score >= self.best_score: self.best_score = score return True elif not save_max_metric_score and score", "map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"])", "self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds]", "'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir,", "# see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint =", "elif not save_max_metric_score and score <= self.best_score: self.best_score = score return True else:", "= self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"]", "epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished, validation is in", "= 0 for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network /", "# Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center", "save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self, epoch): raise", "in \"util.metrics\" file. for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is", "device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in", "= loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist = dist # Automatic", "= scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist =", "+ 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if", "epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1,", "is_best_epoch (bool): In current epoch, if the model get a best metric score", "parameters in the project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def", "(self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best metric score (is_best_epoch=True)", "from functools import partial from pathlib import Path import toml import torch import", "file {model_path.as_posix()} is not exist. please check path.\" map_location = {'cuda:%d' % 0:", "self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score =", "start of experiment. Args: model_path (Path): The file path of the *.tar file", "{model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint. \"\"\" latest_model_path =", "will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict", "fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr,", "f\"{i} is not registered, please check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1),", "display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch)", "self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all checkpoint information,", "Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\"", "Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value", "of experiment. Args: model_path (Path): The file path of the *.tar file \"\"\"", "f\"{latest_model_path} does not exist, can not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work", "- the model parameters Args: is_best_epoch (bool): In current epoch, if the model", "%H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model", "{idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters in", "if self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume", "self.best_score: self.best_score = score return True elif not save_max_metric_score and score <= self.best_score:", "metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics' file.\" fpr, tpr, thresholds =", "self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self):", "= config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT", "{'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"]", "best metric score \"\"\" if save_max_metric_score and score >= self.best_score: self.best_score = score", "import numpy as np import matplotlib.pyplot as plt from joblib import Parallel, delayed", "Args: model_path (Path): The file path of the *.tar file \"\"\" model_path =", "model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network", "score (is_best_epoch=True), the checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t", "# Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"]", "0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank", "list): print(f\"This project contains {len(models)} models, the number of the parameters is: \")", "'train.py' file, if the 'resume' item is True, we will update the following", "checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"]", "metric score in history - the optimizer parameters - the model parameters Args:", "0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished, validation", "checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist,", "scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list,", "params_of_all_networks = 0 for idx, model in enumerate(models, start=1): params_of_network = 0 for", "# In the 'train.py' file, if the 'resume' item is True, we will", "\"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config,", "= {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ =", "resume: bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model", "self.best_score: self.best_score = score return True else: return False @staticmethod def _print_networks(models: list):", "will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to", "This epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError def _validation_epoch(self, epoch): raise", "metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics on validation dataset by paralleling.", "Check if the current model got the best metric score \"\"\" if save_max_metric_score", "self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank ==", "/ 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores):", "+ 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' *", "from pathlib import Path import toml import torch import colorful import numpy as", "mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of the metric to", "assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please check path.\" map_location =", "the best metric score \"\"\" if save_max_metric_score and score >= self.best_score: self.best_score =", "model parameters Args: is_best_epoch (bool): In current epoch, if the model get a", "1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\"))", "params_of_network = 0 for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network", "1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank", "from src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer:", "bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer", "the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean =", "self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint", "fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get", "matplotlib.pyplot as plt from joblib import Parallel, delayed from torch.cuda.amp import GradScaler from", "= self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"]", "follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with", "all checkpoint information, including the optimizer parameters, the model parameters, etc. # New", "the model parameters, etc. # New checkpoint will overwrite the older one. torch.save(state_dict,", "ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config, resume: bool, model, loss_function,", "metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0):", "information, like \"latest_model.tar\". However, the newer information will no overwrite the older one.", "model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(),", "{'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch in", "be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict =", "= 1 self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() /", "# except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as", "self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment", "- the best metric score in history - the optimizer parameters - the", "if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1, figsize=(6,", "1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes)", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center)", "project contains {len(models)} models, the number of the parameters is: \") params_of_all_networks =", "= 0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the", "rank, config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model", "registered, please check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean =", "self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft =", "joblib import Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import", "in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval", "print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError def _validation_epoch(self,", "one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the {epoch} epoch, saving...\"))", "config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization", "checkpoint loaded. Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\"", "i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics' file.\" fpr, tpr,", "_, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision,", "loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist = dist # Automatic mixed", "= self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer': 0,", "as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40))", "contains {len(models)} models, the number of the parameters is: \") params_of_all_networks = 0", "\"\"\" assert \"ROC_AUC\" in metrics_list # Check if the metric is registered in", "directory, which consists of: - the epoch number - the best metric score", "self._train_epoch(epoch) # Only use the first GPU (process) to the validation. if self.rank", "self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute()", "self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config", "the number of the parameters is: \") params_of_all_networks = 0 for idx, model", "\\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40))", "latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' %", "40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def", "return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch):", "update the following args: self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score else", "self.thresholds } # \"latest_model.tar\" # Contains all checkpoint information, including the optimizer parameters,", "config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >=", "(is_best_epoch=True) in the current epoch, # the model checkpoint will be saved as", "check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for", "= self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if", "0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\")", "experiment from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(),", "latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not", "print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle)", "(in \"*.tar\" format) at the start of experiment. Args: model_path (Path): The file", "/ \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load latest checkpoint.\"", "newer information will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) #", "/ \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\". However, the", "* 15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer =", "self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir", "checkpoint will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score", ">= 1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py'", "if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self,", "validation. if self.rank == 0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds]", "BaseTrainer: def __init__(self, dist, rank, config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool", "(DDP) self.rank = rank self.dist = dist # Automatic mixed precision (AMP) self.use_amp", "self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all", "self.best_score = score return True else: return False @staticmethod def _print_networks(models: list): print(f\"This", "self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist = dist #", "no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model", "plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config, resume: bool, model, loss_function, optimizer,", "in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks +=", ") print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except", "device=self.rank, center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval", "\"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current model got the", "handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\" format) at", "paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check if the metric is registered", "older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the {epoch} epoch,", "\"util.metrics\" file. for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not", "save_max_metric_score=True): \"\"\" Check if the current model got the best metric score \"\"\"", "= mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1", "if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score,", "{params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels,", "Contains all checkpoint information, like \"latest_model.tar\". However, the newer information will no overwrite", "in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of the", "start=1): params_of_network = 0 for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}:", "metrics on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check", "\"ROC_AUC\" in metrics_list # Check if the metric is registered in \"util.metrics\" file.", "f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1':", "win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"]", "and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval", "config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"]", "history - the optimizer parameters - the model parameters Args: is_best_epoch (bool): In", "model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer =", "self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\"", "torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"])", "at the start of experiment. Args: model_path (Path): The file path of the", "use the first GPU (process) to the validation. if self.rank == 0: if", "= self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length,", "\"\"\" Resume experiment from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\"", "== 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin", "= self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load", "not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0:", "in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value =", "eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\"", "import DetCurveDisplay import src.util.metrics as metrics from src.util import visualization from src.util.acoustic_utils import", "self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length,", "number - the best metric score in history - the optimizer parameters -", "import colorful import numpy as np import matplotlib.pyplot as plt from joblib import", "1 # Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file,", "config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds =", "overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all", "= model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please check path.\"", "= visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as", "self.optimizer = optimizer self.scheduler = scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank", "src.util import visualization from src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime", "Only use the first GPU (process) to the validation. if self.rank == 0:", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs", "partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank,", "(is_best_epoch=True), the checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving", "is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir", "self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval ==", "does not exist, can not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location", "parameters (in \"*.tar\" format) at the start of experiment. Args: model_path (Path): The", "which consists of: - the epoch number - the best metric score in", "\"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all checkpoint", "if the 'resume' item is True, we will update the following args: self.start_epoch", "epoch model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\":", "= metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of the metric to tensorboard", "= self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in config", "_, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\",", "self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1':", "etc. # New checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix())", "= self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch)", "self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in", "the optimizer parameters, the model parameters, etc. # New checkpoint will overwrite the", "tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes", "the model get a best metric score (is_best_epoch=True), the checkpoint of model will", "0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\")", "model parameters (in \"*.tar\" format) at the start of experiment. Args: model_path (Path):", "successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint. \"\"\"", "(self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\". However,", "_save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of:", "_resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() /", "self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\".", "as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict = { \"epoch\":", "\"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\":", "Add the mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if", "self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded. Training will", "metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics' file.\"", "from src.util import visualization from src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir,", "DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t,", "checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"])", "self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\",", "1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters in the project is", "= colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler = scheduler self.loss_function", "0 for idx, model in enumerate(models, start=1): params_of_network = 0 for param in", "= dist # Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp)", "{'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if", "model_path (Path): The file path of the *.tar file \"\"\" model_path = model_path.expanduser().absolute()", "def _preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\" format) at the start", "import time from functools import partial from pathlib import Path import toml import", "is in progress...\") if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0):", "0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank ==", "\"\"\" if save_max_metric_score and score >= self.best_score: self.best_score = score return True elif", "\"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds }", "1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t,", "recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\",", "toml import torch import colorful import numpy as np import matplotlib.pyplot as plt", "a best metric score (is_best_epoch=True), the checkpoint of model will be saved as", "checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\":", "print(self.color_tool.red(f\"\\t Found a best score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir /", "} # \"latest_model.tar\" # Contains all checkpoint information, including the optimizer parameters, the", "# Only use the first GPU (process) to the validation. if self.rank ==", "the checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch}", "= self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft =", "params_of_all_networks += params_of_network print(f\"The amount of parameters in the project is {params_of_all_networks /", "= partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config =", "Trainer.visualization in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the", "self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The", "saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict = {", "epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError def _validation_epoch(self, epoch): raise NotImplementedError", "model_path): \"\"\" Preload model parameters (in \"*.tar\" format) at the start of experiment.", "tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\"", "40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(),", "in the project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self):", "thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list: mean_score =", "* 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model])", "= self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1))", "@staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores)", "rank self.dist = dist # Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler", "torch import colorful import numpy as np import matplotlib.pyplot as plt from joblib", "enumerate(models, start=1): params_of_network = 0 for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork", "def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists", "predicted, metrics_list, epoch): \"\"\" Get metrics on validation dataset by paralleling. \"\"\" assert", "partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"]", "epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch)", "{epoch} epoch model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(),", "def train(self): for epoch in range(self.start_epoch, self.epochs + 1): if self.rank == 0:", "if self.rank == 0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training", "param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of", "self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1", "self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1':", "== 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has", "torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from src.util", "current epoch, # the model checkpoint will be saved as \"best_model.tar.\" # The", "self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score):", "self.model = model self.optimizer = optimizer self.scheduler = scheduler self.loss_function = loss_function #", "loaded. Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save", "\"latest_model.tar\" # Contains all checkpoint information, including the optimizer parameters, the model parameters,", "open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path):", "\"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load latest checkpoint.\" self.dist.barrier()", "param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks", "self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config", "In the 'train.py' file, if the 'resume' item is True, we will update", "print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False):", "Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert", "current epoch, if the model get a best metric score (is_best_epoch=True), the checkpoint", "/ f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best metric score (is_best_epoch=True) in", "1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score =", "self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft", "In current epoch, if the model get a best metric score (is_best_epoch=True), the", "DistributedDataParallel (DDP) self.rank = rank self.dist = dist # Automatic mixed precision (AMP)", "metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of", "we will update the following args: self.start_epoch = 1 self.best_score = -np.inf if", "f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best metric score (is_best_epoch=True) in the", "\")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir", "<= self.best_score: self.best_score = score return True else: return False @staticmethod def _print_networks(models:", "def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t,", "dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check if the metric", "seconds] This epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError def _validation_epoch(self, epoch):", "However, the newer information will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir /", "map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch", "file, if the 'resume' item is True, we will update the following args:", "the model get a best metric score (is_best_epoch=True) in the current epoch, #", "from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint. \"\"\" latest_model_path", "_, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1)", "and score <= self.best_score: self.best_score = score return True else: return False @staticmethod", "= ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU (process) to the", "is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch has finished.\") def _train_epoch(self, epoch): raise NotImplementedError def", "please check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint =", "delayed from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics", "fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self,", "by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check if the metric is", "the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the {epoch}", "in range(self.start_epoch, self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch}", "score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score,", "self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"]", "epoch in range(self.start_epoch, self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15}", "number of the parameters is: \") params_of_all_networks = 0 for idx, model in", "self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \"))", "0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir],", "self.rank == 0: print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.\") def", "colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler = scheduler self.loss_function =", "# The newer best-scored checkpoint will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t", "amount of parameters in the project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self):", "The newer best-scored checkpoint will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found", "saved as \"best_model.tar.\" # The newer best-scored checkpoint will overwrite the older one.", "is not registered, please check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1))", "not save_max_metric_score and score <= self.best_score: self.best_score = score return True else: return", "\"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\"", "roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr,", "format) at the start of experiment. Args: model_path (Path): The file path of", "\"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\":", "metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score", "model in enumerate(models, start=1): params_of_network = 0 for param in model.parameters(): params_of_network +=", "!= 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch,", "0: print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch,", "checkpoint of model will be saved as \"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch", "print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir /", "checkpoint information, including the optimizer parameters, the model parameters, etc. # New checkpoint", "checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch}", "_, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) >", "{'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) #", "self.best_score = score return True elif not save_max_metric_score and score <= self.best_score: self.best_score", "seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first", "= self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 #", "fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr", "at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\"", "optimizer parameters, the model parameters, etc. # New checkpoint will overwrite the older", "True, we will update the following args: self.start_epoch = 1 self.best_score = -np.inf", "get a best metric score (is_best_epoch=True), the checkpoint of model will be saved", "\"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains", "save_max_metric_score and score >= self.best_score: self.best_score = score return True elif not save_max_metric_score", "following args: self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir", "strict=False) if self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\"", "_set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr,", "scheduler self.loss_function = loss_function # DistributedDataParallel (DDP) self.rank = rank self.dist = dist", "self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t,", "parameters - the model parameters Args: is_best_epoch (bool): In current epoch, if the", "if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the {epoch} epoch, saving...\")) torch.save(state_dict,", "of the parameters is: \") params_of_all_networks = 0 for idx, model in enumerate(models,", "to the validation. if self.rank == 0: if epoch % self.validation_interval == 0:", "for idx, model in enumerate(models, start=1): params_of_network = 0 for param in model.parameters():", "Found a best score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix())", "assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics' file.\" fpr,", "as plt from joblib import Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics", "configurations are as follows: \")) print(self.color_tool.cyan(\"=\" * 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\"", "the current model got the best metric score \"\"\" if save_max_metric_score and score", "Preload model parameters (in \"*.tar\" format) at the start of experiment. Args: model_path", "if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir /", "self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0}", "to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig,", "Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center =", "mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1,", "= partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please check path.\" map_location", "# \"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\". However, the newer information", "self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval", "self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all checkpoint information, including the", "0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean", "older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a best", "registered in \"util.metrics\" file. for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i}", "figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\",", "score in history - the optimizer parameters - the model parameters Args: is_best_epoch", "in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True):", "first GPU (process) to the validation. if self.rank == 0: if epoch %", "/ \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0,", "# \"latest_model.tar\" # Contains all checkpoint information, including the optimizer parameters, the model", "fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def", "epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: -", "Get metrics on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list #", "the start of experiment. Args: model_path (Path): The file path of the *.tar", "checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the epoch number -", "0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if", "Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config", "_preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\" format) at the start of", "\"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all checkpoint information, including the optimizer", "print(f\"The amount of parameters in the project is {params_of_all_networks / 1e6} million.\") def", "print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict = { \"epoch\": epoch, \"best_score\": self.best_score,", "self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval", "__init__(self, dist, rank, config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful", "mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config =", "parameters Args: is_best_epoch (bool): In current epoch, if the model get a best", "class BaseTrainer: def __init__(self, dist, rank, config, resume: bool, model, loss_function, optimizer, scheduler):", "else: return False @staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)} models, the", "GPU (process) to the validation. if self.rank == 0: if epoch % self.validation_interval", "fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall", "dist, rank, config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\")", "self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1", "== 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from", "epoch number - the best metric score in history - the optimizer parameters", "fnr_1_t} return roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs + 1): if", "visualization from src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class", "will update the following args: self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score", "self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the 'resume' item is", "f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload", "preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint.", "file path of the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The", "hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs =", "= {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch =", "src.util.metrics as metrics from src.util import visualization from src.util.acoustic_utils import stft, istft from", "fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics on validation", "pathlib import Path import toml import torch import colorful import numpy as np", "0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1", "progress...\") if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode()", "from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config,", "item is True, we will update the following args: self.start_epoch = 1 self.best_score", "self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" #", "\"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\". However, the newer information will", "\"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1':", "Path import toml import torch import colorful import numpy as np import matplotlib.pyplot", "= self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft,", "def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics on validation dataset by", "for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\")", "epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if", "score \"\"\" if save_max_metric_score and score >= self.best_score: self.best_score = score return True", "except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle:", "if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre>", "self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'='", "{model_path.as_posix()} is not exist. please check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d'", "import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self,", "= score return True elif not save_max_metric_score and score <= self.best_score: self.best_score =", "(bool): In current epoch, if the model get a best metric score (is_best_epoch=True),", "the 'resume' item is True, we will update the following args: self.start_epoch =", "metric is registered in \"util.metrics\" file. for i in metrics_list: assert i in", "print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\")) print(\"[0 seconds] Begin training...\") timer", "np import matplotlib.pyplot as plt from joblib import Parallel, delayed from torch.cuda.amp import", "self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert", "self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds] This epoch", "latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load latest checkpoint.\" self.dist.barrier() # see", "center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft =", "False @staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)} models, the number of", "'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0:", "== 0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished,", "is not exist. please check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' %", "metric score (is_best_epoch=True) in the current epoch, # the model checkpoint will be", "model parameters, etc. # New checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir", "\"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display =", "load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d'", "self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\",", "\"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can", "center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) # Trainer.train in config", "return True else: return False @staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)}", "# New checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) #", "* 15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only", "not exist, can not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location =", "range(self.start_epoch, self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch", "current model got the best metric score \"\"\" if save_max_metric_score and score >=", "* 40)) print(self.color_tool.cyan(toml.dumps(config)[:-1])) # except \"\\n\" print(self.color_tool.cyan(\"=\" * 40)) with open((self.save_dir / f\"{time.strftime('%Y-%m-%d", "estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _", "\"\"\" Check if the current model got the best metric score \"\"\" if", "hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft, n_fft=n_fft,", "state_dict = { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(),", "of parameters in the project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train()", "get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t,", "(AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] #", "self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in", "if the metric is registered in \"util.metrics\" file. for i in metrics_list: assert", "print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the latest", "the newer information will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix())", "print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the", "= torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"])", "\"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters", "def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current model got the best", "if the model get a best metric score (is_best_epoch=True), the checkpoint of model", "= GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"]", "precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch)", "project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod", "self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 # Trainer.visualization in config self.visualization_config =", "= 0 for idx, model in enumerate(models, start=1): params_of_network = 0 for param", "import torch import colorful import numpy as np import matplotlib.pyplot as plt from", "model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please check", "as metrics from src.util import visualization from src.util.acoustic_utils import stft, istft from src.util.utils", "model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please check path.\" map_location = {'cuda:%d'", "args: self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir =", "_is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current model got the best metric", "0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch %", "self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config =", "as \"best_model.tar.\" # The newer best-scored checkpoint will overwrite the older one. if", "\"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information, like \"latest_model.tar\". However, the newer", "the optimizer parameters - the model parameters Args: is_best_epoch (bool): In current epoch,", "will overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in", "Training has finished, validation is in progress...\") if self.save_checkpoint_interval != 0 and (epoch", "0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True)", "= {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False)", "of: - the epoch number - the best metric score in history -", "+= params_of_network print(f\"The amount of parameters in the project is {params_of_all_networks / 1e6}", "f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch)", "a best score in the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def", "overwrite the older one. if is_best_epoch: print(self.color_tool.red(f\"\\t Found a best score in the", "exist, can not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d'", "# Trainer.train in config self.train_config = config[\"trainer\"][\"train\"] self.epochs = self.train_config[\"epochs\"] self.save_checkpoint_interval = self.train_config[\"save_checkpoint_interval\"]", "{params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters in the", "path of the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file", "metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of the metric", "1 self.best_score = -np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"]", "\"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is not exist. please", "epoch, if the model get a best metric score (is_best_epoch=True), the checkpoint of", "model checkpoint will be saved as \"best_model.tar.\" # The newer best-scored checkpoint will", "0: print(f\"[{timer.duration()} seconds] Training has finished, validation is in progress...\") if self.save_checkpoint_interval !=", "== 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\",", "istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank,", "self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t,", "metrics from src.util import visualization from src.util.acoustic_utils import stft, istft from src.util.utils import", "self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded. Training will begin", "self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)}", "on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check if", "= metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall,", "metric score \"\"\" if save_max_metric_score and score >= self.best_score: self.best_score = score return", "checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" #", "(Path): The file path of the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert", "metrics_list # Check if the metric is registered in \"util.metrics\" file. for i", "config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the 'resume' item", "== 0: print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self,", "score (is_best_epoch=True) in the current epoch, # the model checkpoint will be saved", "one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information, like", "text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows: \")) print(self.color_tool.cyan(\"=\" *", "mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 -", "time from functools import partial from pathlib import Path import toml import torch", "else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir", "self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint", "million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer,", "fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _, precision, recall =", "metrics_list, epoch): \"\"\" Get metrics on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\"", "= checkpoint[\"epoch\"] + 1 self.best_score = checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds =", "import matplotlib.pyplot as plt from joblib import Parallel, delayed from torch.cuda.amp import GradScaler", "score <= self.best_score: self.best_score = score return True else: return False @staticmethod def", "def _print_networks(models: list): print(f\"This project contains {len(models)} models, the number of the parameters", "+= param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount", "import src.util.metrics as metrics from src.util import visualization from src.util.acoustic_utils import stft, istft", "checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank}", "= metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels,", "self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer =", "precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"]", "import Parallel, delayed from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics", "0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\") def _resume_checkpoint(self): \"\"\" Resume experiment from the", "\"model\": self.model.state_dict(), \"thresholds\": self.thresholds } # \"latest_model.tar\" # Contains all checkpoint information, including", "roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs + 1): if self.rank ==", "assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load latest checkpoint.\" self.dist.barrier() #", "15}\")) print(\"[0 seconds] Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use", "if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume)", "eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t, eer,", "functools import partial from pathlib import Path import toml import torch import colorful", "metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6))", "self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU (process) to the validation. if", "mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name ==", "can not load latest checkpoint.\" self.dist.barrier() # see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' %", "begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config", "return roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs + 1): if self.rank", "for i in metrics_list: assert i in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please", "_set_models_to_eval_mode(self): self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr =", "the validation. if self.rank == 0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()}", "older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains all checkpoint information,", "score, save_max_metric_score=True): \"\"\" Check if the current model got the best metric score", "roc_auc_mean = 0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add", "f\"The file {model_path.as_posix()} is not exist. please check path.\" map_location = {'cuda:%d' %", "= config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length =", "name>/checkpoints\" directory, which consists of: - the epoch number - the best metric", "the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()} is", "checkpoint will be saved as \"best_model.tar.\" # The newer best-scored checkpoint will overwrite", "self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0: print(f\"Model checkpoint loaded. Training", "self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has finished, validation is in progress...\") if", "== \"ROC_AUC\": roc_auc_mean = mean_score fig, axes = plt.subplots(1, 1, figsize=(6, 6)) display", "metrics.REGISTERED_METRICS[metric_name](fpr, tpr) # Add the mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\",", "will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\" # Contains", "please check 'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0", "import partial from pathlib import Path import toml import torch import colorful import", "is: \") params_of_all_networks = 0 for idx, model in enumerate(models, start=1): params_of_network =", "fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs +", "model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded successfully", "and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()}", "information, including the optimizer parameters, the model parameters, etc. # New checkpoint will", "eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return eer_t, fpr_1_t, fnr_1_t,", "{ \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\": self.model.state_dict(),", "tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _, _,", "if self.rank == 0: print(f\"Model checkpoint loaded. Training will begin at {self.start_epoch} epoch.\")", "Training will begin at {self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint", "return False @staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)} models, the number", "n_fft = self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft", "{self.start_epoch} epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory,", "config, resume: bool, model, loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model =", "from torch.cuda.amp import GradScaler from sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from", "return True elif not save_max_metric_score and score <= self.best_score: self.best_score = score return", "/ \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the current model got", "print(f\"This project contains {len(models)} models, the number of the parameters is: \") params_of_all_networks", "True else: return False @staticmethod def _print_networks(models: list): print(f\"This project contains {len(models)} models,", "self.model.eval() @staticmethod def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels,", "predicted.reshape(-1)) f1, _, _, precision, recall = metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1,", "\") params_of_all_networks = 0 for idx, model in enumerate(models, start=1): params_of_network = 0", "validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list # Check if the", "{epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check", "= { \"epoch\": epoch, \"best_score\": self.best_score, \"optimizer\": self.optimizer.state_dict(), \"scheduler\": self.scheduler.state_dict(), \"scaler\": self.scaler.state_dict(), \"model\":", "src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config, resume:", "/ config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir / \"logs\" self.thresholds", "(epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if self._is_best_epoch(metric_score, save_max_metric_score=self.save_max_metric_score): self._save_checkpoint(epoch, is_best_epoch=True) print(f\"[{timer.duration()} seconds]", "from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path}", "not exist. please check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank}", "Check if the metric is registered in \"util.metrics\" file. for i in metrics_list:", "if self.rank == 0: print(self.color_tool.yellow(f\"{'=' * 15} {epoch} epoch {'=' * 15}\")) print(\"[0", "== 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch", "'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1 self.best_score", "tpr) # Add the mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score,", "import Path import toml import torch import colorful import numpy as np import", "eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch in range(self.start_epoch,", "params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6} million.\") params_of_all_networks += params_of_network print(f\"The", "\"\"\" Get metrics on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in metrics_list", "self.rank == 0: if epoch % self.validation_interval == 0: print(f\"[{timer.duration()} seconds] Training has", "assert self.save_checkpoint_interval >= 1 # Trainer.validation in config self.validation_config = config[\"trainer\"][\"validation\"] self.validation_interval =", "= config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the 'resume' item is True,", "map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"],", "metrics.get_f1((predicted.reshape(-1) > eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch)", "!= 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch)", "self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\" format) at the", "> eer_t).int(), labels.reshape(-1)) self.writer.add_scalar(f\"Validation/F1\", f1, epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds", "= checkpoint[\"best_score\"] self.optimizer.load_state_dict(checkpoint[\"optimizer\"]) self.scheduler.load_state_dict(checkpoint[\"scheduler\"]) self.scaler.load_state_dict(checkpoint[\"scaler\"]) self.model.load_state_dict(checkpoint[\"model\"]) self.thresholds = checkpoint[\"thresholds\"] if self.rank == 0:", "def get_thresholds(labels, scores): eer_t, eer, fpr_1_t, fpr_1_fnr, fnr_1_t, fnr_1_fpr = metrics.compute_thresholds(labels, scores) return", "GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length", "with open((self.save_dir / f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self,", "params_of_network print(f\"The amount of parameters in the project is {params_of_all_networks / 1e6} million.\")", "the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the model get a", "'util.metrics' file.\" fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name", "epoch): \"\"\" Get metrics on validation dataset by paralleling. \"\"\" assert \"ROC_AUC\" in", "- the optimizer parameters - the model parameters Args: is_best_epoch (bool): In current", "eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics on", "saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\" Check if the", "\"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the epoch number - the best", "self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean = mean_score fig, axes =", "prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 )", "% 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank", "self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler = GradScaler(enabled=self.use_amp) # Acoustics self.acoustic_config = config[\"acoustic\"] # Supported", "- tpr, estimator_name=f'ROC_AUC = {roc_auc_mean}') display.plot(axes) self.writer.add_figure(f\"DetCurve\", fig, epoch) eer_t, fpr_1_t, fnr_1_t, _,", "in progress...\") if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch)", "self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler = scheduler", "if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0: prepare_empty_dir([self.checkpoints_dir, self.logs_dir], resume=resume) self.writer = visualization.writer(self.logs_dir.as_posix())", "the metric is registered in \"util.metrics\" file. for i in metrics_list: assert i", "0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(), map_location=map_location) self.start_epoch = checkpoint[\"epoch\"] + 1", "idx, model in enumerate(models, start=1): params_of_network = 0 for param in model.parameters(): params_of_network", "epoch) eer_t, fpr_1_t, fnr_1_t, _, _, _ = self.get_thresholds(labels.reshape(-1), predicted.reshape(-1)) f1, _, _,", "the following args: self.start_epoch = 1 self.best_score = -np.inf if self.save_max_metric_score else np.inf", "the current epoch, # the model checkpoint will be saved as \"best_model.tar.\" #", "visualization.writer(self.logs_dir.as_posix()) self.writer.add_text( tag=\"Configuration\", text_string=f\"<pre> \\n{toml.dumps(config)} \\n</pre>\", global_step=1 ) print(self.color_tool.cyan(\"The configurations are as follows:", "path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location)", "print(f\"[{timer.duration()} seconds] Training has finished, validation is in progress...\") if self.save_checkpoint_interval != 0", "self.thresholds = {'eer': 0, 'fpr_1': 0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]:", "'resume' item is True, we will update the following args: self.start_epoch = 1", "self.dist = dist # Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"] self.scaler =", "self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) if", "experiment. Args: model_path (Path): The file path of the *.tar file \"\"\" model_path", "information will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If", "\"<save_dir>/checkpoints/best_model.tar\". \"\"\" print(f\"\\t Saving {epoch} epoch model checkpoint...\") state_dict = { \"epoch\": epoch,", "-np.inf if self.save_max_metric_score else np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir", "if the current model got the best metric score \"\"\" if save_max_metric_score and", "prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist, rank, config, resume: bool, model,", "of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name == \"ROC_AUC\": roc_auc_mean", "epoch) self.writer.add_scalar(f\"Validation/Precision\", precision, epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t,", "Begin training...\") timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU", "get a best metric score (is_best_epoch=True) in the current epoch, # the model", "torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model preloaded successfully from {model_path.as_posix()}.\")", "scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer self.scheduler =", "epoch.\") def _save_checkpoint(self, epoch, is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which", "model got the best metric score \"\"\" if save_max_metric_score and score >= self.best_score:", "tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list: mean_score", "the first GPU (process) to the validation. if self.rank == 0: if epoch", "parameters is: \") params_of_all_networks = 0 for idx, model in enumerate(models, start=1): params_of_network", "import visualization from src.util.acoustic_utils import stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg')", "sklearn.metrics import DetCurveDisplay import src.util.metrics as metrics from src.util import visualization from src.util.acoustic_utils", "stft, istft from src.util.utils import prepare_empty_dir, ExecutionTime plt.switch_backend('agg') class BaseTrainer: def __init__(self, dist,", "exist. please check path.\" map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} model_checkpoint", "# Add the mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch)", "model get a best metric score (is_best_epoch=True), the checkpoint of model will be", "is_best_epoch=False): \"\"\" Save checkpoint to \"<save_dir>/<config name>/checkpoints\" directory, which consists of: - the", "- the epoch number - the best metric score in history - the", "% self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0", "timer = ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU (process) to", "Contains all checkpoint information, including the optimizer parameters, the model parameters, etc. #", "0 for param in model.parameters(): params_of_network += param.numel() print(f\"\\tNetwork {idx}: {params_of_network / 1e6}", "fnr_1_t, eer, fpr_1_fnr, fnr_1_fpr def metrics_visualization(self, labels, predicted, metrics_list, epoch): \"\"\" Get metrics", "= config[\"trainer\"][\"validation\"] self.validation_interval = self.validation_config[\"validation_interval\"] self.save_max_metric_score = self.validation_config[\"save_max_metric_score\"] assert self.validation_interval >= 1 #", "see https://stackoverflow.com/questions/59760328/how-does-torch-distributed-barrier-work map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank} checkpoint = torch.load(latest_model_path.as_posix(),", "finished, validation is in progress...\") if self.save_checkpoint_interval != 0 and (epoch % self.save_checkpoint_interval", "loss_function, optimizer, scheduler): self.color_tool = colorful self.color_tool.use_style(\"solarized\") self.model = model self.optimizer = optimizer", "the project is {params_of_all_networks / 1e6} million.\") def _set_models_to_train_mode(self): self.model.train() def _set_models_to_eval_mode(self): self.model.eval()", "np.inf self.save_dir = Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir =", "best metric score in history - the optimizer parameters - the model parameters", "= metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr,", "of the *.tar file \"\"\" model_path = model_path.expanduser().absolute() assert model_path.exists(), f\"The file {model_path.as_posix()}", "/ f\"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml\").as_posix(), \"w\") as handle: toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\"", "has finished, validation is in progress...\") if self.save_checkpoint_interval != 0 and (epoch %", "% self.rank} model_checkpoint = torch.load(model_path.as_posix(), map_location=map_location) self.model.load_state_dict(model_checkpoint[\"model\"], strict=False) if self.rank == 0: print(f\"Model", "_print_networks(models: list): print(f\"This project contains {len(models)} models, the number of the parameters is:", "(process) to the validation. if self.rank == 0: if epoch % self.validation_interval ==", "toml.dump(config, handle) self._print_networks([self.model]) def _preload_model(self, model_path): \"\"\" Preload model parameters (in \"*.tar\" format)", "/ 1e6} million.\") params_of_all_networks += params_of_network print(f\"The amount of parameters in the project", "axes = plt.subplots(1, 1, figsize=(6, 6)) display = DetCurveDisplay(fpr=fpr, fnr=1 - tpr, estimator_name=f'ROC_AUC", "New checkpoint will overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / \"latest_model.tar\").as_posix()) # \"model_{epoch_number}.tar\"", "metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list: mean_score = metrics.REGISTERED_METRICS[metric_name](fpr, tpr)", "= self.train_config[\"save_checkpoint_interval\"] self.clip_grad_norm_value = self.train_config[\"clip_grad_norm_value\"] assert self.save_checkpoint_interval >= 1 # Trainer.validation in config", "the {epoch} epoch, saving...\")) torch.save(state_dict, (self.checkpoints_dir / \"best_model.tar\").as_posix()) def _is_best_epoch(self, score, save_max_metric_score=True): \"\"\"", "= Path(config[\"meta\"][\"save_dir\"]).expanduser().absolute() / config[\"meta\"][\"experiment_name\"] self.checkpoints_dir = self.save_dir / \"checkpoints\" self.logs_dir = self.save_dir /", "labels, predicted, metrics_list, epoch): \"\"\" Get metrics on validation dataset by paralleling. \"\"\"", "# Check if the metric is registered in \"util.metrics\" file. for i in", "epoch) self.writer.add_scalar(f\"Validation/recall\", recall, epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return", "parameters, the model parameters, etc. # New checkpoint will overwrite the older one.", "DetCurveDisplay import src.util.metrics as metrics from src.util import visualization from src.util.acoustic_utils import stft,", "in config self.visualization_config = config[\"trainer\"][\"visualization\"] # In the 'train.py' file, if the 'resume'", "the model parameters Args: is_best_epoch (bool): In current epoch, if the model get", "in metrics.REGISTERED_METRICS.keys(), f\"{i} is not registered, please check 'util.metrics' file.\" fpr, tpr, thresholds", "epoch) self.thresholds = {'eer': eer_t, 'fpr_1': fpr_1_t, 'fnr_1': fnr_1_t} return roc_auc_mean def train(self):", "= rank self.dist = dist # Automatic mixed precision (AMP) self.use_amp = config[\"meta\"][\"use_amp\"]", "will no overwrite the older one. torch.save(state_dict, (self.checkpoints_dir / f\"model_{str(epoch).zfill(4)}.tar\").as_posix()) # If the", "hop_length=hop_length, win_length=win_length, device=self.rank, center=center) self.istft = partial(istft, n_fft=n_fft, hop_length=hop_length, win_length=win_length, device=self.rank, center=center) #", "self.save_checkpoint_interval == 0): self._save_checkpoint(epoch) self._set_models_to_eval_mode() metric_score = self._validation_epoch(epoch) if self.save_checkpoint_interval != 0 and", "the mean value of the metric to tensorboard self.writer.add_scalar(f\"Validation/{metric_name}\", mean_score, epoch) if metric_name", "fpr, tpr, thresholds = metrics.roc_curve(labels.reshape(-1), predicted.reshape(-1)) roc_auc_mean = 0 for metric_name in metrics_list:", "# Acoustics self.acoustic_config = config[\"acoustic\"] # Supported STFT n_fft = self.acoustic_config[\"n_fft\"] hop_length =", "0, 'fnr_1': 0} if resume: self._resume_checkpoint() if config[\"meta\"][\"preloaded_model_path\"]: self._preload_model(Path(config[\"preloaded_model_path\"])) if self.rank == 0:", "train(self): for epoch in range(self.start_epoch, self.epochs + 1): if self.rank == 0: print(self.color_tool.yellow(f\"{'='", "self.acoustic_config[\"n_fft\"] hop_length = self.acoustic_config[\"hop_length\"] win_length = self.acoustic_config[\"win_length\"] center = self.acoustic_config[\"center\"] self.torch_stft = partial(stft,", "will be saved as \"best_model.tar.\" # The newer best-scored checkpoint will overwrite the", "self.checkpoints_dir.expanduser().absolute() / \"latest_model.tar\" assert latest_model_path.exists(), f\"{latest_model_path} does not exist, can not load latest", "'fnr_1': fnr_1_t} return roc_auc_mean def train(self): for epoch in range(self.start_epoch, self.epochs + 1):", "def _resume_checkpoint(self): \"\"\" Resume experiment from the latest checkpoint. \"\"\" latest_model_path = self.checkpoints_dir.expanduser().absolute()", "ExecutionTime() self._set_models_to_train_mode() self._train_epoch(epoch) # Only use the first GPU (process) to the validation.", "# the model checkpoint will be saved as \"best_model.tar.\" # The newer best-scored", "got the best metric score \"\"\" if save_max_metric_score and score >= self.best_score: self.best_score", "if save_max_metric_score and score >= self.best_score: self.best_score = score return True elif not", "the epoch number - the best metric score in history - the optimizer" ]
[ "rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie", "f.readlines() for line in lines: arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n',''))", "print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 )", "[3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75],", "pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75], rosetype='area',", "in lines: arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5'))", "Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for", "line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr =", "5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653]", "pyecharts import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines =", "print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center',", "f: lines = f.readlines() for line in lines: arr = line.split(',') if len(arr)", "import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines()", "= [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for line in", "['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1,", "from pyecharts import ThemeRiver from pyecharts import Pie rate = [] with open('quan.txt',", "import ThemeRiver from pyecharts import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as", "= line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr", "title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75], rosetype='area', is_legend_show=False, is_label_show=True )", "open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for line in lines: arr =", "ThemeRiver from pyecharts import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f:", "arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5'))", "<gh_stars>0 from pyecharts import ThemeRiver from pyecharts import Pie rate = [] with", "if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星']", "= ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr,", "as f: lines = f.readlines() for line in lines: arr = line.split(',') if", "print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie =", "lines: arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5'))", "[] with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for line in lines:", "attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\",", "v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50],", "pyecharts import ThemeRiver from pyecharts import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8')", "for line in lines: arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5'))", "width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75], rosetype='area', is_legend_show=False, is_label_show=True ) pie.render()", "lines = f.readlines() for line in lines: arr = line.split(',') if len(arr) ==", "from pyecharts import Pie rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines", "== 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 =", "print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例',", "Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75], rosetype='area', is_legend_show=False, is_label_show=True", "len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5')) print(rate.count('3')+rate.count('2.5')) print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1", "= f.readlines() for line in lines: arr = line.split(',') if len(arr) == 5:", "print(rate.count('2')+rate.count('1.5')) print(rate.count('1')+rate.count('0.5')) attr = ['五星','四星','三星','二星','一星'] v1 = [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900", "rate = [] with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for line", "= [3324,1788,1293,553,1653] pie = Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True,", "line in lines: arr = line.split(',') if len(arr) == 5: rate.append(arr[3].replace('\\n','')) print(rate.count('5')+rate.count('4.5')) print(rate.count('4')+rate.count('3.5'))", "mode='r',encoding='utf-8') as f: lines = f.readlines() for line in lines: arr = line.split(',')", "= Pie('饼图-星级图示例', title_pos='center', width=900 ) pie.add(\"7-17\", attr, v1, center=[75,50], is_random=True, radius=[30,75], rosetype='area', is_legend_show=False,", "with open('quan.txt', mode='r',encoding='utf-8') as f: lines = f.readlines() for line in lines: arr" ]
[ "]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 +", "import api app = api.app # URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule(", "API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 +", "'/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 =", "API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST',", "'/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET',", "]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ])", "app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET',", "app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1", "URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ])", "app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page',", "'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard',", "methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ == '__main__':", "app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule(", "URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register',", "methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ])", "'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule(", "app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET',", "app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET',", "= api.app # URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST',", "api.app # URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ])", "'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule(", "'/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile,", "# URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule(", "methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST',", "view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1", "'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1", "app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>',", "app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password,", "## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register,", "methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ])", "]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1", "view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 +", "]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores,", "+ 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if", "+ 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule(", "'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1", "'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule(", "methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 +", "API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ])", "view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ ==", "]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb',", "app = api.app # URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login,", "'/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page,", "methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule(", "methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 +", "= '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score',", "methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores',", "'/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score,", "view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET',", "app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb,", "'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__", "]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule(", "api app = api.app # URLs ## API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login',", "]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ]) app.add_url_rule( API_VER_1 + 'leaderboard', view_func=api.leaderboard,", "view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password', view_func=api.forgot_password, methods=['GET', 'POST',", "API_VER_1 + 'leaderboard', view_func=api.leaderboard, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ])", "]) app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ == '__main__': app.run(", "view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1", "view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST', ]) app.add_url_rule( '/forgot_password',", "API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST',", "+ 'profile/<person_id>', view_func=api.profile, methods=['GET', ]) app.add_url_rule( API_VER_1 + 'score', view_func=api.score, methods=['GET', 'POST', ])", "+ 'score', view_func=api.score, methods=['GET', 'POST', ]) app.add_url_rule( API_VER_1 + 'login_with_fb', view_func=api.login_with_fb, methods=['POST', ])", "API URLs app.add_url_rule('/', view_func=api.index) app.add_url_rule( '/login', view_func=api.login, methods=['POST', ]) app.add_url_rule( '/register', view_func=api.register, methods=['POST',", "app.add_url_rule( API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ == '__main__': app.run( port=8888,", "API_VER_1 + 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ == '__main__': app.run( port=8888, debug=True,", "+ 'user_scores', view_func=api.user_scores, methods=['GET', ]) if __name__ == '__main__': app.run( port=8888, debug=True, host='0.0.0.0')", "view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/' app.add_url_rule( API_VER_1 + 'profile/<person_id>', view_func=api.profile, methods=['GET',", "view_func=api.forgot_password, methods=['GET', 'POST', ]) app.add_url_rule( '/reset_page', view_func=api.reset_page, methods=['GET', 'POST', ]) API_VER_1 = '/api/v1/'" ]
[ "cloudinary.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ),", "name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot',", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations =", "migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()),", "name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'),", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions(", "Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'),", "('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={ 'ordering': ['-post_date'], }, ),", "), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ),", "migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField(", "Generated by Django 3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db", "[ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile',", "), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project", "options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ),", "operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ),", "('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={ 'ordering': ['-post_date'],", "name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40),", "), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()),", "<gh_stars>0 # Generated by Django 3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models", "2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db import migrations, models import django.db.models.deletion", "('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile',", "['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField(", "import cloudinary.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()),", "[ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField(", "('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')),", "'0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link',", "Django 3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db import migrations,", "verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)),", "on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db import migrations, models import", "awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={ 'ordering':", "model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project',", "migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability',", "verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')),", "= [ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ]", "models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={ 'ordering': ['-post_date'], },", "old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile", "migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'),", "), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True,", "awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ],", "14:07 import awardsapp.models import cloudinary.models from django.db import migrations, models import django.db.models.deletion class", "class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project',", "serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes',", "migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location',", "), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile',", "field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255,", "import awardsapp.models import cloudinary.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db import migrations, models", "('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={", "field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote',", "verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[", "# Generated by Django 3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp',", "name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Profile Picture'), ), migrations.AlterField( model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel(", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awardsapp.profile')), ], options={ 'ordering': ['-post_date'], }, ), ]", "model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField( model_name='profile', name='location', field=models.CharField(max_length=40), ), migrations.AlterField( model_name='profile', name='profile_pic', field=cloudinary.models.CloudinaryField(max_length=255,", "] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations = [", "('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='awardsapp.project')), ('voter',", "awardsapp.models import cloudinary.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "by Django 3.1.7 on 2021-04-07 14:07 import awardsapp.models import cloudinary.models from django.db import", "field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date',", "= [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']}, ), migrations.RenameField( model_name='profile', old_name='portfolio_link', new_name='link', ), migrations.AlterField(", "Migration(migrations.Migration): dependencies = [ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering':", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project',", "screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design',", "primary_key=True, serialize=False, verbose_name='ID')), ('post_date', models.DateTimeField(auto_now_add=True)), ('design', awardsapp.models.IntegerRangeField()), ('usability', awardsapp.models.IntegerRangeField()), ('content', awardsapp.models.IntegerRangeField()), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "dependencies = [ ('awardsapp', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['-post_date']},", "model_name='project', name='screenshot', field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='Project screenshot'), ), migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False," ]
[ "[] # limit = 7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): #", "mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result)", "for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos =", ">>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana =", "coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import utool as", "result = ('valid = %s' % (str(valid),)) >>> print(result) valid = True \"\"\"", "mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True break return", "== 0 or len(set(co)) == len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1,", "= ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx) for idx, spell in", "# if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item]", "= ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c", ">>> print(result) valid = True \"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence]", "python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for", "current item and go check others # print_solutions(current_item + 1, list(knapsack), current_sum) #", "== '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import", "4, 1, 1, 3] # knapsack = [] # limit = 7 #", "Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' %", "spell in spell_list] # feasible_spells = ut.compress(spell_list, flags) # items = [(1, spell.cmc,", "% (str(valid),)) >>> print(result) valid = True \"\"\" mana_costs = [s.mana_cost2 for s", "avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana = filter(len, avail_mana) mana_combos1", "go check others # print_solutions(current_item + 1, list(knapsack), current_sum) # #take the current", ">>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded", "{R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U},", "profile = ut.inject2(__name__, '[mtgutils]') # Then check for color considerations def can_cast(spell_sequence, mana_combos):", "# current_sum += items[current_item] # #current item taken go check others # print_solutions(current_item", "six.string_types)] for co in mana_combos1 ] flags = [len(co) == 0 or len(set(co))", "python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import *", "__name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\"", "mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2] unflat_combos3 =", "mana_combos) >>> result = ('valid = %s' % (str(valid),)) >>> print(result) valid =", "(str(valid),)) >>> print(result) valid = True \"\"\" mana_costs = [s.mana_cost2 for s in", "= [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m)", "(current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] # #current", "+ 1, list(knapsack), current_sum) # #take the current item if the value doesn't", "list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos =", "<EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> #", "7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if all items have", "mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana = filter(len, avail_mana)", "if not isinstance(c, six.string_types)] for co in mana_combos1 ] flags = [len(co) ==", "mana_combos1 ] flags = [len(co) == 0 or len(set(co)) == len(co) for co", "* # NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island',", "combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first", "'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST", "-m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * #", "#z = reduce(operator.add, m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land", "mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded", "-*- from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut import", "-m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from", "for idx, spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence", "2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items =", "= sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their", "land in land_list] # avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids", "TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items = [2, 3,", "from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut import itertools", "# feasible_spells = ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx) for idx,", "import six import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check", "#import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in land_list] # avail_mana", "= [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co", "if the value doesn't exceed the limit # if (current_sum + items[current_item] <=", "for spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num", "[len(co) == 0 or len(set(co)) == len(co) for co in non_class1] mana_combos2 =", "= %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid", ">>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card", "strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B},", "for num in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo", "combo in spell_combos: total = sum([spell.cmc for spell in combo]) if total <=", "spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells,", "for mana_combo in mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add,", "len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total =", "spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in", "# http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items = [2, 3, 4, 3,", "in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4", "Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>>", "\"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils", "given the current mana sources Args: spell_sequence (list): mana_combos (list): Returns: bool: valid", "knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item taken go check others #", "is castable given the current mana sources Args: spell_sequence (list): mana_combos (list): Returns:", "mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1] >>>", "= mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result", "current_sum): # #if all items have been processed print the solution and return:", "valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils", "= [1,1,3,4,5] # items = [2, 3, 4, 3, 3, 5, 4, 1,", "idea that two fetches cant fetch the same land non_class1 = [ [c", "cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which spells are castable without color", "def print_solutions(current_item, knapsack, current_sum): # #if all items have been processed print the", "mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island',", "TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL>", "operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check for color considerations", "# print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian", "card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1,", "mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs) valid = False", "import * # NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic", "for land in land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for", "{B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC},", "in mana_combos1 ] flags = [len(co) == 0 or len(set(co)) == len(co) for", ">>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>>", "if __name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples", "(ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U},", "= ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c in", "in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in mana_combos4]", "ut import itertools import six import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]')", "spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): #", "total_avail_mana = len(land_list) # flags = [spell.cmc < total_avail_mana for spell in spell_list]", "spell_combos: total = sum([spell.cmc for spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo)", "for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c,", ">>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>>", "# NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra',", "phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL> def", "{R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from", "print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana =", "land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana in avail_mana]", "= [] # limit = 7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum):", "{U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC},", "for land in land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode", "max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their own flags =", "mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO:", "mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> #", "print(knapsack) # return # #don't take the current item and go check others", "for c in co if not isinstance(c, six.string_types)] for co in mana_combos1 ]", "the current item and go check others # print_solutions(current_item + 1, list(knapsack), current_sum)", "on their own flags = [spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells", "= False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid", "cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which spells are castable", "land_list] # avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x))", "items = [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] # total_val, subset", "#don't take the current item and go check others # print_solutions(current_item + 1,", "= [spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences", "avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana in avail_mana] max_avail_cmc", "import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain',", "land in land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the", "from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>>", "result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck,", "# def print_solutions(current_item, knapsack, current_sum): # #if all items have been processed print", "= mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan", "# total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) #", "ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos def", "= list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\"", "{C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U},", "({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck,", "in land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana in", "{U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B},", "'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos", "# return # #don't take the current item and go check others #", "Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' %", "= [] for num in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num))", "cmc_feasible_sequences = [] for num in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells,", "+ items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item", "knapsack, current_sum): # #if all items have been processed print the solution and", "'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand',", "items = [2, 3, 4, 3, 3, 5, 4, 1, 1, 3] #", "list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total = sum([spell.cmc for spell in combo])", "land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True))", "limit # if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum +=", "return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which spells are castable without", "rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check for color considerations def can_cast(spell_sequence,", "deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets'])", "len(land_list) # flags = [spell.cmc < total_avail_mana for spell in spell_list] # feasible_spells", "True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils", "mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in mana_combos4] #z", "m) for m in mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed() #", "item and go check others # print_solutions(current_item + 1, list(knapsack), current_sum) # #take", "ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in mana_combos4] #z = reduce(operator.add, m)", "for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): #", "idx) for idx, spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) #", "castable given the current mana sources Args: spell_sequence (list): mana_combos (list): Returns: bool:", "'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos =", "get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their own flags = [spell.cmc <=", "unicode_literals import utool as ut import itertools import six import operator print, rrr,", "= ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos", "mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut", "filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4]", "mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids)", "import * # NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical", "current_sum += items[current_item] # #current item taken go check others # print_solutions(current_item +", "{C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U},", "4, 3, 3, 5, 4, 1, 1, 3] # knapsack = [] #", "print(result) valid = False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel'])", "feasible_spells = ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx) for idx, spell", "others # print_solutions(current_item + 1, list(knapsack), current_sum) # #take the current item if", "= possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver", "return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their own flags", "in spell_combos: total = sum([spell.cmc for spell in combo]) if total <= max_avail_cmc:", "utool as ut import itertools import six import operator print, rrr, profile =", "\"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list]", "land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that", "print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian /", "CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import", "3, 3, 5, 4, 1, 1, 3] # knapsack = [] # limit", "= [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana = filter(len, avail_mana) maxgen_list =", "current_sum) # #take the current item if the value doesn't exceed the limit", "valid = True \"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost =", ") #print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils", "flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co]", "mana_combo in mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo)", "mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that two fetches cant fetch the", "= [spell.cmc < total_avail_mana for spell in spell_list] # feasible_spells = ut.compress(spell_list, flags)", "taken go check others # print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack,", "-m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * #", "3] # knapsack = [] # limit = 7 # #<EMAIL> # def", "('valid = %s' % (str(valid),)) >>> print(result) valid = True \"\"\" mana_costs =", "# Get spells castable on their own flags = [spell.cmc <= max_avail_cmc for", "considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence is castable given", "utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in land_list] # avail_mana =", "in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in range(1,", "flags) cmc_feasible_sequences = [] for num in range(1, len(feasible_spells) + 1): spell_combos =", "Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid", "valid = True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python", "in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset,", "print_function, unicode_literals import utool as ut import itertools import six import operator print,", "= [land.mana_potential(deck=deck) for land in land_list] # avail_mana = filter(len, avail_mana) # mana_combos4", "def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST", "if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] #", "mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>>", "land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck)", "# ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>>", "valid = False for mana_combo in mana_combos: # print('mana_combo = %r' % (mana_combo,))", "(list): mana_combos (list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m", ">>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>>", "in land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea", "[1,1,3,4,5] # items = [2, 3, 4, 3, 3, 5, 4, 1, 1,", "mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s'", "--allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut #", "r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support()", "[[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in", "mana_combos (list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils", "('valid = %s' % (str(valid),)) >>> print(result) valid = False Example1: >>> #", "for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence is", "for c in co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co", "{U}, {U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False)", "# Then check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a", "#mana_combos = list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None):", "list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine:", "in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c)", "are castable without color consideration # # make knapsack items # total_avail_mana =", "[list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for", "spells castable on their own flags = [spell.cmc <= max_avail_cmc for spell in", "cant fetch the same land non_class1 = [ [c for c in co", "Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python", "'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos =", "in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # #", "same land non_class1 = [ [c for c in co if not isinstance(c,", ">>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC},", "without color consideration # # make knapsack items # total_avail_mana = len(land_list) #", "if combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\"", "land non_class1 = [ [c for c in co if not isinstance(c, six.string_types)]", "<= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which spells", "have been processed print the solution and return: # if current_item == len(items):", "python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import *", "1, list(knapsack), current_sum) # #take the current item if the value doesn't exceed", ">>> result = ('valid = %s' % (str(valid),)) >>> print(result) valid = True", "total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO:", "been processed print the solution and return: # if current_item == len(items): #", "non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck)", "in mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) #", "-m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import utool as", "[c for c in co if not isinstance(c, six.string_types)] for co in mana_combos1", "filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that two fetches cant", "[s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs) valid = False for mana_combo", "possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U},", "False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid =", "of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s'", "the solution and return: # if current_item == len(items): # print(knapsack) # return", "knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine: python", "check others # print_solutions(current_item + 1, list(knapsack), current_sum) # #take the current item", "c.mana_potential2(deck=deck) for c in co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for", "%s' % (str(valid),)) >>> print(result) valid = True \"\"\" mana_costs = [s.mana_cost2 for", "= True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m", "# #don't take the current item and go check others # print_solutions(current_item +", "'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>>", "as ut import itertools import six import operator print, rrr, profile = ut.inject2(__name__,", "{U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte import", "print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__ == '__main__':", "mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get", "first determine which spells are castable without color consideration # # make knapsack", "'__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing", "fetch the same land non_class1 = [ [c for c in co if", "Returns if a spell sequence is castable given the current mana sources Args:", "isinstance(c, six.string_types)] for co in mana_combos1 ] flags = [len(co) == 0 or", "\"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA", "the current item if the value doesn't exceed the limit # if (current_sum", "http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items", "# # first determine which spells are castable without color consideration # #", "hybrid if combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None):", "recurse=False) for land in land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) #", "'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck)", "{U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}),", "for m in mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed() # avail_mana", "python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import *", "+ 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\"", "[spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences =", "the limit # if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum", "mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 =", "print_solutions(current_item, knapsack, current_sum): # #if all items have been processed print the solution", "mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list =", "< total_avail_mana for spell in spell_list] # feasible_spells = ut.compress(spell_list, flags) # items", "# # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items = [2, 3, 4,", "= [len(co) == 0 or len(set(co)) == len(co) for co in non_class1] mana_combos2", "(list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0", "return: # if current_item == len(items): # print(knapsack) # return # #don't take", ">>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb',", "= mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1]", "land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card", "{U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs", "#mana_combos4 = [reduce(operator.add, m) for m in mana_combos4] #z = reduce(operator.add, m) #import", "break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations", "spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana)", "feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in range(1, len(feasible_spells) +", "{U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U},", "max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which spells are", "mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef'])", "'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc =", "# print_solutions(current_item + 1, list(knapsack), current_sum) # #take the current item if the", "spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python", "Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc", "0) if __name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils", "avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags", "num in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in", "max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result)", "num)) for combo in spell_combos: total = sum([spell.cmc for spell in combo]) if", "sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their own", "# #if all items have been processed print the solution and return: #", "max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on", "(mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid", "mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for", "for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 =", "# items = [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] # total_val,", "({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte", "bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m", "%s' % (str(valid),)) >>> print(result) valid = False Example1: >>> # ENABLE_DOCTEST >>>", "flags = [spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list, flags)", "#utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in land_list] # avail_mana = filter(len,", "the value doesn't exceed the limit # if (current_sum + items[current_item] <= limit):", "a spell sequence is castable given the current mana sources Args: spell_sequence (list):", ">>> result = ('valid = %s' % (str(valid),)) >>> print(result) valid = False", "# ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid =", "which spells are castable without color consideration # # make knapsack items #", "# avail_mana = [land.mana_potential(deck=deck) for land in land_list] # avail_mana = filter(len, avail_mana)", "1, 1, 3] # knapsack = [] # limit = 7 # #<EMAIL>", "mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc):", "total_avail_mana for spell in spell_list] # feasible_spells = ut.compress(spell_list, flags) # items =", "= False for mana_combo in mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2", "# NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken", "print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U},", "[''.join(c) for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python", "http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items = [2, 3, 4, 3, 3,", "--exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import", "= [tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags)", "ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c in mana_combos]))", "for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m", "def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST", ">>> print(result) valid = False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning", "--test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>>", "'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result", "'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid =", "mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in mana_combos4] #z =", "can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' % (str(valid),)) >>> print(result) valid", "flags) #mana_combos = list(map(tuple, [''.join(c) for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list,", "Reef']) >>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result =", "determine which spells are castable without color consideration # # make knapsack items", "valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' % (str(valid),)) >>>", "make knapsack items # total_avail_mana = len(land_list) # flags = [spell.cmc < total_avail_mana", "Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence =", "flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c", "ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # #", "ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in", "\"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs) valid =", "# avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for", "import utool as ut import itertools import six import operator print, rrr, profile", "can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence is castable given the current", "'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> #", "from mtgmonte.mtgutils import * # NOQA >>> from mtgmonte import mtgobjs >>> deck", "current_item == len(items): # print(knapsack) # return # #don't take the current item", "print_solutions(current_item + 1, list(knapsack), current_sum) # #take the current item if the value", "# first determine which spells are castable without color consideration # # make", ">>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list,", "in land_list] # avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids =", "= mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card =", "spell_list] # feasible_spells = ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx) for", ">>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos)", "reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True break", "knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python -m", "own flags = [spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list,", "processed print the solution and return: # if current_item == len(items): # print(knapsack)", "\"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana = filter(len, avail_mana)", "for spell in spell_list] # feasible_spells = ut.compress(spell_list, flags) # items = [(1,", "= can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' % (str(valid),)) >>> print(result)", ">>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos)", "python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import utool", "\"\"\" Returns if a spell sequence is castable given the current mana sources", "fetches cant fetch the same land non_class1 = [ [c for c in", "= get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6", "= ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in range(1, len(feasible_spells) + 1):", "mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA", "exceed the limit # if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item]) #", "NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau']))", "deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G},", "({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G},", "sequence is castable given the current mana sources Args: spell_sequence (list): mana_combos (list):", "the same land non_class1 = [ [c for c in co if not", ">>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid = %s' % (str(valid),))", "= [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana = filter(len, avail_mana) mana_combos1 =", "spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in range(1, len(feasible_spells)", "= [2, 3, 4, 3, 3, 5, 4, 1, 1, 3] # knapsack", "Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>> from", ">>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana", "'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos", "ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items", "land in land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana", "= ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix", "1, 3] # knapsack = [] # limit = 7 # #<EMAIL> #", "{U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC},", "c in co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in", "reduce(operator.add, m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in land_list]", "# Encode the idea that two fetches cant fetch the same land non_class1", "Strand', 'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>>", "get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\"", "nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}),", "spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total = sum([spell.cmc for spell", "+= items[current_item] # #current item taken go check others # print_solutions(current_item + 1,", "ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result", "[] for num in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for", "= [reduce(operator.add, m) for m in mana_combos4] #z = reduce(operator.add, m) #import utool", "= len(land_list) # flags = [spell.cmc < total_avail_mana for spell in spell_list] #", "six import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check for", "Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand'])", "all items have been processed print the solution and return: # if current_item", "] flags = [len(co) == 0 or len(set(co)) == len(co) for co in", "#take the current item if the value doesn't exceed the limit # if", "= ('valid = %s' % (str(valid),)) >>> print(result) valid = False Example1: >>>", "mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils", "for c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m", "mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient", "# ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>> from mtgmonte import", "else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co))", "knapsack = [] # limit = 7 # #<EMAIL> # def print_solutions(current_item, knapsack,", "mana_combos): \"\"\" Returns if a spell sequence is castable given the current mana", "len(set(co)) == len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 =", "mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for", "<= max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = []", "castable on their own flags = [spell.cmc <= max_avail_cmc for spell in spell_list]", "= [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] # total_val, subset =", "current mana sources Args: spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine: python", "# flags = [spell.cmc < total_avail_mana for spell in spell_list] # feasible_spells =", "import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA ut.doctest_funcs()", "flags) # items = [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] #", "+ 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total = sum([spell.cmc", "# #items = [1,1,3,4,5] # items = [2, 3, 4, 3, 3, 5,", "mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list", ">>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos,", "possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of", "mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for", "in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc", "total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # #", "subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task", "mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> #", "3, 4, 3, 3, 5, 4, 1, 1, 3] # knapsack = []", "# if current_item == len(items): # print(knapsack) # return # #don't take the", "# total_avail_mana = len(land_list) # flags = [spell.cmc < total_avail_mana for spell in", "({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U},", "mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple, [''.join(c) for c in mana_combos])) return", "[spell.cmc < total_avail_mana for spell in spell_list] # feasible_spells = ut.compress(spell_list, flags) #", "ut.inject2(__name__, '[mtgutils]') # Then check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns", "mtgmonte.mtgutils import * # NOQA >>> from mtgmonte import mtgobjs >>> deck =", "# limit = 7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if", "#print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine: python -m mtgmonte.mtgutils python", "#def hacky_knapsack_solns(): # # first determine which spells are castable without color consideration", "# # make knapsack items # total_avail_mana = len(land_list) # flags = [spell.cmc", "deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from", "= ut.inject2(__name__, '[mtgutils]') # Then check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\"", "combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4,", "result = ('valid = %s' % (str(valid),)) >>> print(result) valid = False Example1:", "co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4", "Args: spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils --test-can_cast", ">>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid", "mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA", "== len(items): # print(knapsack) # return # #don't take the current item and", "__future__ import absolute_import, division, print_function, unicode_literals import utool as ut import itertools import", "def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable on their own flags = [spell.cmc", "go check others # print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0)", "items[current_item] # #current item taken go check others # print_solutions(current_item + 1, knapsack,", "#current item taken go check others # print_solutions(current_item + 1, knapsack, current_sum )", "mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana =", "= mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island',", "--exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>>", "Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>> from", "= filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that two fetches", "knapsack items # total_avail_mana = len(land_list) # flags = [spell.cmc < total_avail_mana for", "= [max(map(len, mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def", "from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list", "CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import", "= (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True)", "maxgen_list = [max(map(len, mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc", "Then check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell", "{U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana", "mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result =", "current_sum ) #print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine: python -m", "# make knapsack items # total_avail_mana = len(land_list) # flags = [spell.cmc <", "avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that two fetches cant fetch", "limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item taken go check", "Strand', 'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>>", "utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut", "avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x", "sum([spell.cmc for spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def", "== len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c]", "= mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0:", "color consideration # # make knapsack items # total_avail_mana = len(land_list) # flags", "* # NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island',", "= mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid =", "#if all items have been processed print the solution and return: # if", "print the solution and return: # if current_item == len(items): # print(knapsack) #", "Strand', 'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>>", "if a spell sequence is castable given the current mana sources Args: spell_sequence", "[ [c for c in co if not isinstance(c, six.string_types)] for co in", "6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana = filter(len,", "avail_mana) maxgen_list = [max(map(len, mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return", "'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result", "# #take the current item if the value doesn't exceed the limit #", "import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list =", "avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells castable", "[2, 3, 4, 3, 3, 5, 4, 1, 1, 3] # knapsack =", "Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan", ">>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White", "avail_mana = [land.mana_potential(deck=deck) for land in land_list] # avail_mana = filter(len, avail_mana) #", "{B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\"", "if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine", ">>> from mtgmonte.mtgutils import * # NOQA >>> from mtgmonte import mtgobjs >>>", "{U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B},", "= ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task #", "not isinstance(c, six.string_types)] for co in mana_combos1 ] flags = [len(co) == 0", "spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result = ('valid", "check others # print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if", "co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3)", "(str(valid),)) >>> print(result) valid = False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence =", "% (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2):", "--test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST", "in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list, max_avail_cmc): # Get spells", "s in spell_sequence] combined_cost = sum(mana_costs) valid = False for mana_combo in mana_combos:", "= [ [c for c in co if not isinstance(c, six.string_types)] for co", "the current mana sources Args: spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine:", "#items = [1,1,3,4,5] # items = [2, 3, 4, 3, 3, 5, 4,", "in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet) else", "# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import", "castable without color consideration # # make knapsack items # total_avail_mana = len(land_list)", "# #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if all items have been", "ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix #", "{U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}),", "recurse=True) for land in land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len, mana))", "{U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U},", "card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1,", "mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32 import", "{U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for", "spell sequence is castable given the current mana sources Args: spell_sequence (list): mana_combos", "'Shivan Reef']) >>> mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence", "{B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC},", "list(knapsack), current_sum) # #take the current item if the value doesn't exceed the", "= reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True", "mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for", "if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2]", "solution and return: # if current_item == len(items): # print(knapsack) # return #", "current item if the value doesn't exceed the limit # if (current_sum +", "# spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: #", "sources Args: spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine: python -m mtgmonte.mtgutils", "non_class1 = [ [c for c in co if not isinstance(c, six.string_types)] for", ">>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence, mana_combos) >>> result =", "# http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] #", "in co] for co in mana_combos2] unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3]", "1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total = sum([spell.cmc for", "for land in land_list] # avail_mana = filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana))", "list(ut.iprod(*avail_mana)) # Encode the idea that two fetches cant fetch the same land", "%r' % (mana_combo,)) combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if", "spell_sequence] combined_cost = sum(mana_costs) valid = False for mana_combo in mana_combos: # print('mana_combo", "'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded", "[tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos", "= ('valid = %s' % (str(valid),)) >>> print(result) valid = True \"\"\" mana_costs", "import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana = filter(len,", "{G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC}, {U}, {U}, {B}, {R}),", "range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total", "= land_list[-1] >>> mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True,", "# # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5]", "filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list)", "spells are castable without color consideration # # make knapsack items # total_avail_mana", "# knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item taken go check others", "spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence, mana_combos) >>>", "if current_item == len(items): # print(knapsack) # return # #don't take the current", "combo2 = reduce(operator.add, mana_combo) # TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid =", "nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G},", "deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana", "= True \"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs)", "or len(set(co)) == len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3", "import itertools import six import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') #", "{R}), ({CC}, {U}, {U}, {B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana =", "idx, spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence =", "'[mtgutils]') # Then check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if", "= %s' % (str(valid),)) >>> print(result) valid = False Example1: >>> # ENABLE_DOCTEST", "len(items): # print(knapsack) # return # #don't take the current item and go", "[reduce(operator.add, m) for m in mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed()", "= list(itertools.combinations(feasible_spells, num)) for combo in spell_combos: total = sum([spell.cmc for spell in", "mana_combos = possible_mana_combinations(land_list, deck) Example0: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight',", "Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand',", "= sum(mana_costs) valid = False for mana_combo in mana_combos: # print('mana_combo = %r'", "max_avail_cmc): # Get spells castable on their own flags = [spell.cmc <= max_avail_cmc", "result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}),", "max_avail_cmc for spell in spell_list] feasible_spells = ut.compress(spell_list, flags) cmc_feasible_sequences = [] for", "enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items, total_avail_mana) # spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2))", "total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns(): # # first determine which", "# mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags =", "consideration # # make knapsack items # total_avail_mana = len(land_list) # flags =", "for combo in spell_combos: total = sum([spell.cmc for spell in combo]) if total", "return # #don't take the current item and go check others # print_solutions(current_item", "hacky_knapsack_solns(): # # first determine which spells are castable without color consideration #", ">>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))", "others # print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__", "= (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U}, {C}), ({CC},", "CommandLine: python -m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() #", "Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>> valid = can_cast(spell_sequence,", "value doesn't exceed the limit # if (current_sum + items[current_item] <= limit): #", "3, 5, 4, 1, 1, 3] # knapsack = [] # limit =", "land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra', 'Island', 'Flooded Strand', 'Flooded Strand']) >>> card =", "return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example:", "/ hybrid if combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL> def possible_mana_combinations(land_list,", "{U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U},", "\"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils", ">>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc,", "in mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck)", "Encode the idea that two fetches cant fetch the same land non_class1 =", "division, print_function, unicode_literals import utool as ut import itertools import six import operator", "isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2] unflat_combos3", "x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos = ut.compress(mana_combos4, flags) #mana_combos = list(map(tuple,", "= mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded", "Strand']) >>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result =", "co if not isinstance(c, six.string_types)] for co in mana_combos1 ] flags = [len(co)", "co in mana_combos1 ] flags = [len(co) == 0 or len(set(co)) == len(co)", "for co in mana_combos1 ] flags = [len(co) == 0 or len(set(co)) ==", "combined_cost.satisfied_by(combo2): valid = True break return valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine:", "combined_cost = sum(mana_costs) valid = False for mana_combo in mana_combos: # print('mana_combo =", "limit = 7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if all", "deck = mtgobjs.Deck(mtgobjs.load_cards(['Volcanic Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand',", "ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)]", "valid <EMAIL> def possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>>", "and go check others # print_solutions(current_item + 1, list(knapsack), current_sum) # #take the", "deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>> from", "their own flags = [spell.cmc <= max_avail_cmc for spell in spell_list] feasible_spells =", "[(1, spell.cmc, idx) for idx, spell in enumerate(feasible_spells)] # total_val, subset = ut.knapsack(items,", "ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['White Knight', 'Delver of Secrets']) >>> valid = can_cast(spell_sequence,", "# knapsack = [] # limit = 7 # #<EMAIL> # def print_solutions(current_item,", "check for color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence", "{U}, {U}, {B}, {C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B},", "# items = [2, 3, 4, 3, 3, 5, 4, 1, 1, 3]", "Strand', 'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1] >>> mana_combos = possible_mana_combinations(land_list,", "mana sources Args: spell_sequence (list): mana_combos (list): Returns: bool: valid CommandLine: python -m", "# # TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items =", "({CC}, {U}, {G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U},", "in spell_sequence] combined_cost = sum(mana_costs) valid = False for mana_combo in mana_combos: #", "and return: # if current_item == len(items): # print(knapsack) # return # #don't", "#<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if all items have been processed", "land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True))", "items[current_item] <= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item taken", "# #current item taken go check others # print_solutions(current_item + 1, knapsack, current_sum", "item taken go check others # print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0,", "Get spells castable on their own flags = [spell.cmc <= max_avail_cmc for spell", "mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand',", "mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example:", ">>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>> from mtgmonte", "color considerations def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence is castable", "c in co if not isinstance(c, six.string_types)] for co in mana_combos1 ] flags", "python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup:", "mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA", "possible_mana_combinations(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>>", "that two fetches cant fetch the same land non_class1 = [ [c for", "deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island',", "from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana", "items have been processed print the solution and return: # if current_item ==", "len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if", "flags = [len(co) == 0 or len(set(co)) == len(co) for co in non_class1]", "0 or len(set(co)) == len(co) for co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags)", "-*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import utool", "= 7 # #<EMAIL> # def print_solutions(current_item, knapsack, current_sum): # #if all items", "{B}, {R}), \"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land", "strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in", "{U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}), ({CC},", "[land.mana_potential(deck=deck) for land in land_list] # avail_mana = filter(len, avail_mana) # mana_combos4 =", "= %s' % (str(valid),)) >>> print(result) valid = True \"\"\" mana_costs = [s.mana_cost2", "% (str(valid),)) >>> print(result) valid = False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence", "= sum([spell.cmc for spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences", "--test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>>", "Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1] >>>", "nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land", "in range(1, len(feasible_spells) + 1): spell_combos = list(itertools.combinations(feasible_spells, num)) for combo in spell_combos:", "<= limit): # knapsack.append(items[current_item]) # current_sum += items[current_item] # #current item taken go", "the idea that two fetches cant fetch the same land non_class1 = [", "{G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U}, {U}, {R}),", "items # total_avail_mana = len(land_list) # flags = [spell.cmc < total_avail_mana for spell", "-m mtgmonte.mtgutils --test-possible_mana_combinations Example: >>> # ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * #", "avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana)) # Encode the idea that two", "1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__ == '__main__': r\"\"\" CommandLine:", "= [s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs) valid = False for", "-m mtgmonte.mtgutils python -m mtgmonte.mtgutils --allexamples \"\"\" import multiprocessing multiprocessing.freeze_support() # for win32", "avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana = filter(len, avail_mana) maxgen_list", "NOQA >>> from mtgmonte import mtgobjs >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow',", "import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check for color", "nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for land in land_list]", "ut.compress(spell_list, flags) cmc_feasible_sequences = [] for num in range(1, len(feasible_spells) + 1): spell_combos", "m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in land_list] #", "[land.mana_potential2(deck=deck, recurse=False) for land in land_list] avail_mana = filter(len, avail_mana) mana_combos1 = list(ut.iprod(*avail_mana))", "item if the value doesn't exceed the limit # if (current_sum + items[current_item]", "co in non_class1] mana_combos2 = ut.compress(mana_combos1, flags) mana_combos3 = [[[c] if isinstance(c, mtgobjs.ManaSet)", "valid = False Example1: >>> # ENABLE_DOCTEST >>> spell_sequence = mtgobjs.load_cards(['Lightning Angel']) >>>", "'Island', 'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list,", "in co if not isinstance(c, six.string_types)] for co in mana_combos1 ] flags =", "= ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in mana_combos4] #z = reduce(operator.add,", "in spell_list] # feasible_spells = ut.compress(spell_list, flags) # items = [(1, spell.cmc, idx)", "{C}), ({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U},", "two fetches cant fetch the same land non_class1 = [ [c for c", "for spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return cmc_feasible_sequences #def hacky_knapsack_solns():", "= list(ut.iprod(*avail_mana)) # Encode the idea that two fetches cant fetch the same", "print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then check for color considerations def", ">>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC}, {U}, {G}, {U},", "= filter(len, avail_mana) maxgen_list = [max(map(len, mana)) for mana in avail_mana] max_avail_cmc =", "total = sum([spell.cmc for spell in combo]) if total <= max_avail_cmc: cmc_feasible_sequences.append(combo) return", "[max(map(len, mana)) for mana in avail_mana] max_avail_cmc = sum(maxgen_list) return max_avail_cmc def get_cmc_feasible_sequences(spell_list,", "# print(knapsack) # return # #don't take the current item and go check", "return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>>", "m in mana_combos4] #z = reduce(operator.add, m) #import utool #utool.embed() # avail_mana =", "(ut.repr2(max_avail_cmc, nl=1, strvals=True, nobraces=True)) >>> print(result) 6 \"\"\" avail_mana = [land.mana_potential2(deck=deck, recurse=True) for", "'Flooded Strand', 'Flooded Strand']) >>> card = land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck)", "# print_solutions(current_item + 1, knapsack, current_sum ) #print_solutions(0, knapsack, 0) if __name__ ==", "{G}, {U}, {C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}),", "Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded", "unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add,", "= reduce(operator.add, m) #import utool #utool.embed() # avail_mana = [land.mana_potential(deck=deck) for land in", "CommandLine: python -m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1", "print(result) valid = True \"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost", "itertools import six import operator print, rrr, profile = ut.inject2(__name__, '[mtgutils]') # Then", "({CC}, {U}, {G}, {U}, {R}), ({CC}, {U}, {G}, {B}, {R}), ({CC}, {U}, {U},", "= possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>> print(result) ({CC},", "= filter(len, avail_mana) # mana_combos4 = list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in", "Island', 'Tundra', 'Plateau'])) >>> land_list = mtgobjs.load_cards(['Mountain', 'Island', 'Flooded Strand', 'Shivan Reef']) >>>", "-m mtgmonte.mtgutils --test-can_cast python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>>", "sum(mana_costs) valid = False for mana_combo in mana_combos: # print('mana_combo = %r' %", "get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils --test-get_max_avail_cmc Example: >>> # ENABLE_DOCTEST >>>", ">>> mana_combos = possible_mana_combinations(land_list, deck) >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True)) >>>", "{G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}),", "# TODO: # # http://stackoverflow.com/questions/30554290/how-to-derive-all-solutions-from-knapsack-dp-matrix # #items = [1,1,3,4,5] # items = [2,", "= list(ut.iprod(*avail_mana)) combo_ids = [tuple(sorted(x)) for x in mana_combos4] flags = ut.flag_unique_items(combo_ids) mana_combos", "False for mana_combo in mana_combos: # print('mana_combo = %r' % (mana_combo,)) combo2 =", "[land.mana_potential2(deck=deck, recurse=True) for land in land_list] avail_mana = filter(len, avail_mana) maxgen_list = [max(map(len,", "flags = [spell.cmc < total_avail_mana for spell in spell_list] # feasible_spells = ut.compress(spell_list,", "{C}), ({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U},", "'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef'])", "ENABLE_DOCTEST >>> from mtgmonte.mtgutils import * # NOQA >>> from mtgmonte import mtgobjs", "for s in spell_sequence] combined_cost = sum(mana_costs) valid = False for mana_combo in", "absolute_import, division, print_function, unicode_literals import utool as ut import itertools import six import", "({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U}, {B}, {C}), ({CC}, {U}, {G},", "{R}), \"\"\" from mtgmonte import mtgobjs avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in", "doesn't exceed the limit # if (current_sum + items[current_item] <= limit): # knapsack.append(items[current_item])", "python -m mtgmonte.mtgutils --exec-can_cast:0 python -m mtgmonte.mtgutils --exec-can_cast:1 Setup: >>> # ENABLE_DOCTEST >>>", "True \"\"\" mana_costs = [s.mana_cost2 for s in spell_sequence] combined_cost = sum(mana_costs) valid", "deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island'])) >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Tundra',", "import absolute_import, division, print_function, unicode_literals import utool as ut import itertools import six", "take the current item and go check others # print_solutions(current_item + 1, list(knapsack),", "co in mana_combos3] mana_combos4 = ut.flatten(unflat_combos3) #mana_combos4 = [reduce(operator.add, m) for m in", "5, 4, 1, 1, 3] # knapsack = [] # limit = 7", "mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef']) >>> card = land_list[-1]", "c in mana_combos])) return mana_combos def get_max_avail_cmc(land_list, deck=None): \"\"\" CommandLine: python -m mtgmonte.mtgutils", "# TODO: phyrexian / hybrid if combined_cost.satisfied_by(combo2): valid = True break return valid", "({CC}, {U}, {G}, {B}, {C}), ({CC}, {U}, {U}, {U}, {C}), ({CC}, {U}, {U},", "= land_list[-1] >>> max_avail_cmc = get_max_avail_cmc(land_list, deck) >>> result = (ut.repr2(max_avail_cmc, nl=1, strvals=True,", "spell_sequence = ut.take(feasible_spells, ut.get_list_column(subset, 2)) # # http://stackoverflow.com/questions/30007102/number-of-all-combinations-in-knapsack-task # # TODO: # #", "def can_cast(spell_sequence, mana_combos): \"\"\" Returns if a spell sequence is castable given the" ]
[ "<reponame>himanshu-dutta/torchlit def to_device(data, device): if isinstance(data, (list, tuple)): return [to_device(x, device) for x", "device): if isinstance(data, (list, tuple)): return [to_device(x, device) for x in data] return", "to_device(data, device): if isinstance(data, (list, tuple)): return [to_device(x, device) for x in data]", "def to_device(data, device): if isinstance(data, (list, tuple)): return [to_device(x, device) for x in", "isinstance(data, (list, tuple)): return [to_device(x, device) for x in data] return data.to(device, non_blocking=True)", "if isinstance(data, (list, tuple)): return [to_device(x, device) for x in data] return data.to(device," ]
[ "= self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim =", "split == \"val\" if name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True,", "torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name,", "transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for x", "= int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split ==", "for x in dataset], axis=0) if split == \"train\" or split == \"val\":", "None: data_dir = DEFAULT_DATA_DIR load_train = split == \"train\" or split == \"val\"", "numpy as np import torch from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\"", "== \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images", "flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim =", "if flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim =", "(\"train\", \"val\", \"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train = split", "data_dir is None: data_dir = DEFAULT_DATA_DIR load_train = split == \"train\" or split", "= torch.stack([x[0] for x in dataset], axis=0) if split == \"train\" or split", ") elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), )", "self.images, self.images def __len__(self): return len(self.images) def __getitem__(self, idx): img = self.images[idx] return", "= DEFAULT_DATA_DIR load_train = split == \"train\" or split == \"val\" if name", "download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True,", "assert split in (\"train\", \"val\", \"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR", "dataset], axis=0) if split == \"train\" or split == \"val\": train_samples = int(train_split", "def __len__(self): return len(self.images) def __getitem__(self, idx): img = self.images[idx] return img, img", "def shape(self): return self._shape def to_tensors(self): return self.images, self.images def __len__(self): return len(self.images)", "def to_tensors(self): return self.images, self.images def __len__(self): return len(self.images) def __getitem__(self, idx): img", "datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST(", "dataset name {name}\") self.images = torch.stack([x[0] for x in dataset], axis=0) if split", "elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else:", "data_dir = DEFAULT_DATA_DIR load_train = split == \"train\" or split == \"val\" if", "split == \"train\" or split == \"val\" if name == \"mnist\": dataset =", "name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise", "import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\",", "= datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset =", "self.images def __len__(self): return len(self.images) def __getitem__(self, idx): img = self.images[idx] return img,", "== \"val\": train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images))", "rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples]", "= idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images),", "split == \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:]", "dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name", "transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(),", "@property def shape(self): return self._shape def to_tensors(self): return self.images, self.images def __len__(self): return", "self._shape def to_tensors(self): return self.images, self.images def __len__(self): return len(self.images) def __getitem__(self, idx):", "example = self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim", "example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1] @property def shape(self): return self._shape", "val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images =", "self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1] @property def shape(self):", "transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8,", "if split == \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs =", "return self.images, self.images def __len__(self): return len(self.images) def __getitem__(self, idx): img = self.images[idx]", "idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1)", "= self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example =", "= example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1] @property", "or split == \"val\" if name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train,", "ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in", "import torch from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def", "= datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\")", "train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0]", "raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for x in dataset], axis=0)", "name {name}\") self.images = torch.stack([x[0] for x in dataset], axis=0) if split ==", "self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example", "self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0] if", "idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape =", "\"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\":", "= example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1] @property def shape(self): return", "train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train,", "as np import torch from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class", "== \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown", "= np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples] self.images", "self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0]", "flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\", \"test\") if data_dir is", "example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1] @property def", "example[1] @property def shape(self): return self._shape def to_tensors(self): return self.images, self.images def __len__(self):", "shape(self): return self._shape def to_tensors(self): return self.images, self.images def __len__(self): return len(self.images) def", "np import torch from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset):", "return self._shape def to_tensors(self): return self.images, self.images def __len__(self): return len(self.images) def __getitem__(self,", "in (\"train\", \"val\", \"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train =", "data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\") self.images =", "train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\", \"test\") if data_dir is None:", "split == \"val\": train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs =", "= self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim =", "== \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name ==", "load_train = split == \"train\" or split == \"val\" if name == \"mnist\":", "= example[1] @property def shape(self): return self._shape def to_tensors(self): return self.images, self.images def", "self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0]", "or split == \"val\": train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs", "data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir,", "datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\") self.images", "== \"val\" if name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(),", "self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim = example[1]", "\"train\" or split == \"val\": train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45)", "torch.stack([x[0] for x in dataset], axis=0) if split == \"train\" or split ==", "DEFAULT_DATA_DIR load_train = split == \"train\" or split == \"val\" if name ==", "__init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\",", "\"val\" if name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), )", "download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for", "\"val\": train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if", "if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else: self.input_dim = example[0] self.target_dim", "\"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train = split == \"train\"", "self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0]", "self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten:", "DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None", "= \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ):", "np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples] self.images =", "import numpy as np import torch from torchvision import datasets, transforms DEFAULT_DATA_DIR =", "name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name", "self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\", \"test\")", "else: raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for x in dataset],", "\"train\" or split == \"val\" if name == \"mnist\": dataset = datasets.MNIST( data_dir,", "if name == \"mnist\": dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif", "{name}\") self.images = torch.stack([x[0] for x in dataset], axis=0) if split == \"train\"", "torch from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__(", "if split == \"train\" or split == \"val\": train_samples = int(train_split * len(self.images))", "self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:]", "self.images = self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim", "\"fashion-mnist\": dataset = datasets.FashionMNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) else: raise ValueError(\"Unknown dataset", "\"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert", "else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if flatten: self.images", "== \"train\" or split == \"val\" if name == \"mnist\": dataset = datasets.MNIST(", "in dataset], axis=0) if split == \"train\" or split == \"val\": train_samples =", "class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split", "def __init__( self, name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\",", ") else: raise ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for x in", "dataset = datasets.MNIST( data_dir, train=load_train, download=True, transform=transforms.ToTensor(), ) elif name == \"fashion-mnist\": dataset", "name, split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\", \"test\") if", "int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\":", "self.input_dim = example[0] self.target_dim = example[1] @property def shape(self): return self._shape def to_tensors(self):", "flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim = example[0].shape[0]", "self.target_dim = example[1] @property def shape(self): return self._shape def to_tensors(self): return self.images, self.images", "axis=0) if split == \"train\" or split == \"val\": train_samples = int(train_split *", "): assert split in (\"train\", \"val\", \"test\") if data_dir is None: data_dir =", "if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train = split == \"train\" or", "split == \"train\" or split == \"val\": train_samples = int(train_split * len(self.images)) rng", "\"val\", \"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train = split ==", "ValueError(\"Unknown dataset name {name}\") self.images = torch.stack([x[0] for x in dataset], axis=0) if", "= rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else:", "\"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images =", "train_samples = int(train_split * len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split", "= self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape = self.images.shape[1:] if", "= example[0] self.target_dim = example[1] @property def shape(self): return self._shape def to_tensors(self): return", "self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0] if flatten: self.input_dim", "split=\"train\", flatten=True, train_split=0.8, data_dir=None ): assert split in (\"train\", \"val\", \"test\") if data_dir", "data_dir=None ): assert split in (\"train\", \"val\", \"test\") if data_dir is None: data_dir", "-1) example = self[0] if flatten: self.input_dim = example[0].shape[0] self.target_dim = example[1].shape[0] else:", "= self.images.shape[1:] if flatten: self.images = self.images.reshape(len(self.images), -1) example = self[0] if flatten:", "is None: data_dir = DEFAULT_DATA_DIR load_train = split == \"train\" or split ==", "= split == \"train\" or split == \"val\" if name == \"mnist\": dataset", "else: self.input_dim = example[0] self.target_dim = example[1] @property def shape(self): return self._shape def", "== \"train\" or split == \"val\": train_samples = int(train_split * len(self.images)) rng =", "example[0] self.target_dim = example[1] @property def shape(self): return self._shape def to_tensors(self): return self.images,", "train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs]", "= idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs = idxs[train_samples:] self.images = self.images[val_idxs] self._shape", "x in dataset], axis=0) if split == \"train\" or split == \"val\": train_samples", "idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs]", "* len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs", "self.images = torch.stack([x[0] for x in dataset], axis=0) if split == \"train\" or", "to_tensors(self): return self.images, self.images def __len__(self): return len(self.images) def __getitem__(self, idx): img =", "split in (\"train\", \"val\", \"test\") if data_dir is None: data_dir = DEFAULT_DATA_DIR load_train", "datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self, name, split=\"train\", flatten=True,", "rng.permutation(len(self.images)) if split == \"train\": train_idxs = idxs[:train_samples] self.images = self.images[train_idxs] else: val_idxs", "from torchvision import datasets, transforms DEFAULT_DATA_DIR = \"/is/rg/al/Projects/prob-models/data/\" class ReconstructionDataset(torch.utils.data.Dataset): def __init__( self,", "len(self.images)) rng = np.random.RandomState(45) idxs = rng.permutation(len(self.images)) if split == \"train\": train_idxs =" ]
[ "import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry", "] # In[5]: futureframe = [] for dt in expiry : expiry_dt =", "import pandas as pd import nsepy as ns from datetime import date #", "in stocks.iterrows(): try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date", "futures= True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error", "# In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe", "= [] for dt in expiry : expiry_dt = dt for idx,name in", "date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for dt in expiry : expiry_dt", "stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ]", "expiry : expiry_dt = dt for idx,name in stocks.iterrows(): try : Symbol =", "for idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures=", "ns from datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks", "in expiry : expiry_dt = dt for idx,name in stocks.iterrows(): try : Symbol", ": Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt )", "datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]:", "df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df)", "at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]: #", "futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv')", "= ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except:", "as pd import nsepy as ns from datetime import date # In[2]: stocks", "utf-8 # In[1]: import pandas as pd import nsepy as ns from datetime", "pd import nsepy as ns from datetime import date # In[2]: stocks =", "nsepy as ns from datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") #", "for dt in expiry : expiry_dt = dt for idx,name in stocks.iterrows(): try", "= date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for dt in expiry", "expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\",", "In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe =", "coding: utf-8 # In[1]: import pandas as pd import nsepy as ns from", "[] for dt in expiry : expiry_dt = dt for idx,name in stocks.iterrows():", "# In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29),", "# In[1]: import pandas as pd import nsepy as ns from datetime import", "= name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr'] =", ") df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) # In[6]:", "df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) # In[6]: futures", "from datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks #", "expiry_dt = dt for idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL'] df", "ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print", "except: print (\"error at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') #", "= pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] #", "# In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]: # In[7]: # In[", "In[1]: import pandas as pd import nsepy as ns from datetime import date", "dt for idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31),", "try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt", "expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for dt in", "In[5]: futureframe = [] for dt in expiry : expiry_dt = dt for", "pandas as pd import nsepy as ns from datetime import date # In[2]:", "In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28)", "import nsepy as ns from datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\")", "name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]: # In[7]: #", "expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) #", "date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry =", "(\"error at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]:", "df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe)", "idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True,", "stocks.iterrows(): try : Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date =", "# In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for", "# coding: utf-8 # In[1]: import pandas as pd import nsepy as ns", "= df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL']) # In[6]: futures =", "In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]: # In[7]: # In[ ]:", "pd.read_csv(\"stocklist.csv\") # In[3]: stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]:", "date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for dt in expiry :", "dt in expiry : expiry_dt = dt for idx,name in stocks.iterrows(): try :", ": expiry_dt = dt for idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL']", "Symbol = name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr']", "symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[ ]: # In[7]:", "name['SYMBOL'] df = ns.get_history(symbol=Symbol,start=date(2016,11,1),end=date(2017,12,31), futures= True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000", "print (\"error at symbol\", name['SYMBOL']) # In[6]: futures = pd.concat(futureframe) futures.to_csv('futuresdata.csv') # In[", "stocks # In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = []", "as ns from datetime import date # In[2]: stocks = pd.read_csv(\"stocklist.csv\") # In[3]:", "= dt for idx,name in stocks.iterrows(): try : Symbol = name['SYMBOL'] df =", "In[4]: expiry = date(2017,2,23),date(2017,3,30),date(2017,4,27),date(2017,5,25),date(2017,6,29), date(2017,7,27),date(2017,8,31),date(2017,9,28),date(2017,10,26),date(2017,11,30),date(2017,12,28) ] # In[5]: futureframe = [] for dt", "= expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at symbol\", name['SYMBOL'])", "True, expiry_date = expiry_dt ) df['turnover_cr'] = df['Turnover']/1000000000000 futureframe.append(df) except: print (\"error at", "futureframe = [] for dt in expiry : expiry_dt = dt for idx,name", "# In[5]: futureframe = [] for dt in expiry : expiry_dt = dt" ]
[ "# get OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to", "to OmniSci, allowing time for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'],", "'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters':", "'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]}", "= parse_source(df) df = parse_city(df) return df # Load CSV to dataframe def", "header, dimension in zip(dimensionHeaders, dimensions): print (header + ': ' + dimension) for", "parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025'", "VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression':", "dimension in zip(dimensionHeaders, dimensions): print (header + ': ' + dimension) for i,", "'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ],", "'AdWords API Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud'", "response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader',", "= json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values'])", "ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics an authorized", "= get_report(analytics, reportbody) # format the data into the columnar tables OmniSci wants", "(' + str(i) + ')') for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name')", "path to client_secrets.json file. start_date = '2017-04-01' tables_and_files = [ #blog post views", "# GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics", "data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x:", "set the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return", "credentials, and authorize HTTP object with them. # If the credentials don't exist", "dimension) for i, values in enumerate(dateRangeValues): print ('Date range (' + str(i) +", "blog landing page' elif url == 'www.omnisci.com/blog': source = 'omnisci blog landing page'", "build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use the Analytics", "= format_data(response) # save the dataframe to a file output_to_csv(df, csv_file) # create", "def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True)", "'tag' elif 'author' in url: source = 'author' elif url == 'www.mapd.com/blog': source", "= 'mapd blog landing page' elif url == 'www.omnisci.com/blog': source = 'omnisci blog", "rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension in", "as pd from pandas.io.json import json_normalize from omnisci_utils import get_credentials from omnisci_utils import", "# Set up a Flow object to be used if we need to", "omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils", "Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443", "metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' + value) def", "httplib2 from oauth2client import client from oauth2client import file from oauth2client import tools", "are invalid run through the native client # flow. The Storage object will", "the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df", "[]) for header, dimension in zip(dimensionHeaders, dimensions): print (header + ': ' +", "('Date range (' + str(i) + ')') for metricHeader, value in zip(metricHeaders, values.get('values')):", "reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for", "storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the service object. analytics = build('analytics',", "TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests':", "TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID,", "'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS def", "dtcols, intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols,", "a file output_to_csv(df, csv_file) # create the new dataframe from the file contents", "'author' in url: source = 'author' elif url == 'www.mapd.com/blog': source = 'mapd", "service object. \"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags", "= 'mapd blog landing page' elif url == 'www.mapd.com/blog/': source = 'mapd blog", "on=['geo_city_code'], how='left') return df def parse_geo_data(df): df = parse_source(df) df = parse_city(df) return", "EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING", "= get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for the instance to wake", "= 'omnisci blog landing page' elif 'community.mapd.com' in url: source = 'mapd community", "format_int_col from parsing_utils import format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes", "Service Object to query the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def", "tfrmt) #force the column containing datetime values to be recast from strings to", "= pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions,", "ericgrant \"\"\" import argparse from apiclient.discovery import build import httplib2 from oauth2client import", "dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column", "API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics Reporting", "df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into", "wants df = format_data(response) # save the dataframe to a file output_to_csv(df, csv_file)", "source(url): if 'tag' in url: source = 'tag' elif 'author' in url: source", "('loading dataframe into table ' + os_table) drop_table_mapd(connection, os_table) #drop the old table", "Created on Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import argparse from", "TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP,", "source = 'omnisci blog landing page' elif 'community.mapd.com' in url: source = 'mapd", "1) df = df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df def wake_up_omnisci():", "= 'tag' elif 'author' in url: source = 'author' elif url == 'www.mapd.com/blog':", "i, values in enumerate(dateRangeValues): print ('Date range (' + str(i) + ')') for", "SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path +", "'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval", "Prepare credentials, and authorize HTTP object with them. # If the credentials don't", "columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader:", "1) df = df.drop('city_status', 1) return df def wake_up_omnisci(): # get OmniSci credentials", "= build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use the", "file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None or credentials.invalid:", "columnar tables OmniSci wants df = format_data(response) # save the dataframe to a", "flow. The Storage object will ensure that if successful the good # credentials", "Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery import build", "drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table", "containing datetime values to be recast from strings to timestamps format_int_col(df, intcols) format_str_col(df,", "from oauth2client import tools import pandas as pd from pandas.io.json import json_normalize from", "= storage.get() if credentials is None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags)", "tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the service object. analytics =", "tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page',", "from oauth2client import file from oauth2client import tools import pandas as pd from", "or are invalid run through the native client # flow. The Storage object", "[]) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for row", "rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect to Google Analytics analytics =", "'mapd blog landing page' elif url == 'www.omnisci.com/blog': source = 'omnisci blog landing", "parse_source(df) df = parse_city(df) return df # Load CSV to dataframe def parse_data(csvfile,", "columnHeader for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics", "= row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): print (header + ':", "')') for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' +", "x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns =", "value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' + value) def format_data(response):", "= ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json'", "rename_cols, time_format) print ('loading dataframe into table ' + os_table) drop_table_mapd(connection, os_table) #drop", "= columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for row in rows:", "Returns: analytics an authorized analyticsreporting service object. \"\"\" # Parse command-line arguments. parser", "ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews", "'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name':", "service object. Returns: analytics an authorized analyticsreporting service object. \"\"\" # Parse command-line", "and prints the Analytics Reporting API V4 response\"\"\" for report in response.get('reports', []):", "= '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv' # parameters", "the analyticsreporting service object. Returns: analytics an authorized analyticsreporting service object. \"\"\" #", "[ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression':", "'www.mapd.com/blog': source = 'mapd blog landing page' elif url == 'www.mapd.com/blog/': source =", "analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the data into the", "{'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL',", "\"\"\"Parses and prints the Analytics Reporting API V4 response\"\"\" for report in response.get('reports',", "print_response(response): \"\"\"Parses and prints the Analytics Reporting API V4 response\"\"\" for report in", "('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE", "= columnHeader for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist())", "response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in", "OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting') if __name__ == '__main__': main()", "Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH =", "flags) http = credentials.authorize(http=httplib2.Http()) # Build the service object. analytics = build('analytics', 'v4',", "client from oauth2client import file from oauth2client import tools import pandas as pd", "Set up a Flow object to be used if we need to authenticate.", "+ str(i) + ')') for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') +", "{'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'},", "file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None or credentials.invalid: credentials = tools.run_flow(flow,", "# Load CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt):", "'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'}) df", "Build the service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def", "return analytics def get_report(analytics, bodycontent): # Use the Analytics Service Object to query", "pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for the instance", "\"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names return (result)", "discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use the Analytics Service Object to", "def parse_geo_data(df): df = parse_source(df) df = parse_city(df) return df # Load CSV", "OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting') if", "format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1)", "+ 'omnisci_keys.json' wait_interval = 25 # parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']", "mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval =", "TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8),", "CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df =", "from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting') if __name__ == '__main__':", "def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics an authorized analyticsreporting service", "file output_to_csv(df, csv_file) # create the new dataframe from the file contents df", "exist or are invalid run through the native client # flow. The Storage", "Reporting API V4 response\"\"\" for report in response.get('reports', []): columnHeader = report.get('columnHeader', {})", "don't exist or are invalid run through the native client # flow. The", "a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None or", "client_secrets.json file. start_date = '2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path", "df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if 'tag' in url: source =", "= parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type',", "pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\",", "connect to Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) # format", "analytics an authorized analyticsreporting service object. \"\"\" # Parse command-line arguments. parser =", "to query the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses", "print (header + ': ' + dimension) for i, values in enumerate(dateRangeValues): print", "{'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits", "timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code',", "NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT", "Storage object will ensure that if successful the good # credentials will get", "# path to client_secrets.json file. start_date = '2017-04-01' tables_and_files = [ #blog post", "http = credentials.authorize(http=httplib2.Http()) # Build the service object. analytics = build('analytics', 'v4', http=http,", "#drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into OmniSci", "object will ensure that if successful the good # credentials will get written", "+ 'AdWords API Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost =", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon Oct 15", "invalid run through the native client # flow. The Storage object will ensure", "API V4 response\"\"\" for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders", "column names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def", "them. # If the credentials don't exist or are invalid run through the", "elif url == 'www.omnisci.com/blog': source = 'omnisci blog landing page' elif url ==", "os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new", "csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect", "file from oauth2client import tools import pandas as pd from pandas.io.json import json_normalize", "= ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names", "format_data(response) # save the dataframe to a file output_to_csv(df, csv_file) # create the", "a Flow object to be used if we need to authenticate. flow =", "metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for row in", "pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values to", "return df # Load CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols,", "x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\",", "Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery import", "\"unique_pageviews\", \"time_on_page\"] # set the column names return (result) def output_to_csv(df, fn): df.to_csv(fn,", "format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df", "floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force", "# create the new dataframe from the file contents df = parse_data(csv_file, dt_cols,", "{'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df =", "'93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json file. start_date =", "TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8),", "return connection # MAIN def main(): # connect to omnisci connection = wake_up_omnisci()", "# format the data into the columnar tables OmniSci wants df = format_data(response)", "columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])", "import httplib2 from oauth2client import client from oauth2client import file from oauth2client import", "'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions':", "['blog']} ]} ]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes", "argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a Flow object to", "\"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names return (result) def", "1) return df def wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame() dfcreds", "{}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title", "columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions =", "metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics =", "from the file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format)", "1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df = df.drop('city_status', 1)", "OmniSci, allowing time for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost,", "df = format_data(response) # save the dataframe to a file output_to_csv(df, csv_file) #", "format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords", "output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if 'tag' in url:", "url == 'www.mapd.com/blog/': source = 'omnisci blog landing page' elif 'community.mapd.com' in url:", "create the new dataframe from the file contents df = parse_data(csv_file, dt_cols, int_cols,", "'www.mapd.com/blog/': source = 'omnisci blog landing page' elif 'community.mapd.com' in url: source =", "json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics", "in url: source = 'author' elif url == 'www.mapd.com/blog': source = 'mapd blog", "[ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {},", "native client # flow. The Storage object will ensure that if successful the", "table into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci;", "-*- \"\"\" Created on Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import", "metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist())", "if credentials is None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http =", "flags = parser.parse_args([]) # Set up a Flow object to be used if", "[ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000,", "message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object with them. # If the", "= file_path + 'omnisci_keys.json' wait_interval = 25 # parameters for Google API SCOPES", "= wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def main(): # connect", "HTTP object with them. # If the credentials don't exist or are invalid", "file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv' #", "landing page' elif url == 'www.omnisci.com/blog': source = 'omnisci blog landing page' elif", "to client_secrets.json file. start_date = '2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits',", "df # Load CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings,", "= df.drop('city_status', 1) return df def wake_up_omnisci(): # get OmniSci credentials dfcreds =", "connection = wake_up_omnisci() if connection != \"RETRY\": # loop through tables and reports", "= pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set", "'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use the Analytics Service", "= pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for the", "dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def main(): # connect to omnisci", "oauth2client import tools import pandas as pd from pandas.io.json import json_normalize from omnisci_utils", "['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names format_str_col(dfcity,", "the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into OmniSci #", "and authorize HTTP object with them. # If the credentials don't exist or", "= pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values", "and reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody", "df def wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile)", "creationstring, reportbody in tables_and_files: # connect to Google Analytics analytics = initialize_analyticsreporting() response", "get written back to a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if", "page' elif url == 'www.mapd.com/blog/': source = 'omnisci blog landing page' elif 'community.mapd.com'", "file. start_date = '2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path +", "TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT", "response = get_report(analytics, reportbody) # format the data into the columnar tables OmniSci", "import client from oauth2client import file from oauth2client import tools import pandas as", "data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics =", "import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import format_int_col from parsing_utils import", "tables OmniSci wants df = format_data(response) # save the dataframe to a file", "back to a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is", "from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import format_int_col from", "'community.mapd.com' in url: source = 'mapd community forum' elif 'community.omnisci.com' in url: source", "mapdport = 443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile", "= parse_city(df) return df # Load CSV to dataframe def parse_data(csvfile, dtcols, intcols,", "import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import", "connection.load_table(os_table, df) #load the new table into OmniSci # disconnect from OmniSci disconnect_mapd(connection)", "dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for", "# connect to OmniSci, allowing time for the instance to wake connection =", "{ 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ],", "the column containing datetime values to be recast from strings to timestamps format_int_col(df,", "connect to omnisci connection = wake_up_omnisci() if connection != \"RETRY\": # loop through", "+ dimension) for i, values in enumerate(dateRangeValues): print ('Date range (' + str(i)", "landing page' elif 'community.mapd.com' in url: source = 'mapd community forum' elif 'community.omnisci.com'", "# save the dataframe to a file output_to_csv(df, csv_file) # create the new", "' + os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load", "'2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {},", "# Build the service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics", "'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ] # GOOGLE ANALYTICS", "from apiclient.discovery import build import httplib2 from oauth2client import client from oauth2client import", "successful the good # credentials will get written back to a file. storage", "storage.get() if credentials is None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http", "format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values to be recast from", "connection != \"RETRY\": # loop through tables and reports for os_table, csv_file, dt_cols,", "= df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df def wake_up_omnisci(): # get", "= df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name',", "Analytics Reporting API V4 response\"\"\" for report in response.get('reports', []): columnHeader = report.get('columnHeader',", "float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into table ' + os_table) drop_table_mapd(connection,", "\"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT", "zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' + value) def format_data(response): reports =", "wake_up_omnisci() if connection != \"RETRY\": # loop through tables and reports for os_table,", "reportbody) # format the data into the columnar tables OmniSci wants df =", "formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a Flow object to be", "parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a Flow", "\"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([])", "= 'omnisci blog landing page' elif url == 'www.mapd.com/blog/': source = 'omnisci blog", "names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df):", "= ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to", "connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into OmniSci # disconnect from OmniSci", "= 'author' elif url == 'www.mapd.com/blog': source = 'mapd blog landing page' elif", "community forum' else: source = 'other / direct' return (source) def parse_source(df): df['source']", "dataframe from the file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols,", "['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' #", "parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status']", "argparse from apiclient.discovery import build import httplib2 from oauth2client import client from oauth2client", "the Analytics Reporting API V4 response\"\"\" for report in response.get('reports', []): columnHeader =", "'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING", "url == 'www.omnisci.com/blog': source = 'omnisci blog landing page' elif url == 'www.mapd.com/blog/':", "'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'},", "import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location Criteria", "data into the columnar tables OmniSci wants df = format_data(response) # save the", "the credentials don't exist or are invalid run through the native client #", "get_report(analytics, bodycontent): # Use the Analytics Service Object to query the Analytics Reporting", "== 'www.mapd.com/blog': source = 'mapd blog landing page' elif url == 'www.mapd.com/blog/': source", "== 'www.omnisci.com/blog': source = 'omnisci blog landing page' elif url == 'www.mapd.com/blog/': source", "apiclient.discovery import build import httplib2 from oauth2client import client from oauth2client import file", "'omnisci community forum' else: source = 'other / direct' return (source) def parse_source(df):", "index=False) # OMNISCI FUNCTIONS def source(url): if 'tag' in url: source = 'tag'", "= pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\",", "def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if 'tag' in", "= 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25 # parameters for", "'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name':", "OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname =", "source = 'omnisci community forum' else: source = 'other / direct' return (source)", "enumerate(dateRangeValues): print ('Date range (' + str(i) + ')') for metricHeader, value in", "# OMNISCI FUNCTIONS def source(url): if 'tag' in url: source = 'tag' elif", "# credentials will get written back to a file. storage = file.Storage('analyticsreporting.dat') credentials", "strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code',", "up a Flow object to be used if we need to authenticate. flow", "= 443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile =", "connect to OmniSci, allowing time for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'],", "{'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT", "for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' + value)", "MAIN def main(): # connect to omnisci connection = wake_up_omnisci() if connection !=", "'community.omnisci.com' in url: source = 'omnisci community forum' else: source = 'other /", "client # flow. The Storage object will ensure that if successful the good", "int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect to Google", "mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25 # parameters", "floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df", "storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None or credentials.invalid: credentials", "pandas.io.json import json_normalize from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils", "the new dataframe from the file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols,", "object with them. # If the credentials don't exist or are invalid run", "for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', [])", "response\"\"\" for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions',", "(source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes)", "df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code',", "ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [", "file_path + 'omnisci_keys.json' wait_interval = 25 # parameters for Google API SCOPES =", "command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up", "dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows',", "#force the column containing datetime values to be recast from strings to timestamps", "# If the credentials don't exist or are invalid run through the native", "initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics an authorized analyticsreporting service object.", "source = 'mapd blog landing page' elif url == 'www.omnisci.com/blog': source = 'omnisci", "[ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name':", "credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the service object.", "forum' else: source = 'other / direct' return (source) def parse_source(df): df['source'] =", "DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path", "good # credentials will get written back to a file. storage = file.Storage('analyticsreporting.dat')", "is None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) #", "# loop through tables and reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols,", "source = 'mapd community forum' elif 'community.omnisci.com' in url: source = 'omnisci community", "parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a Flow object to be used", "omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import format_int_col from parsing_utils", "with them. # If the credentials don't exist or are invalid run through", "csv_file) # create the new dataframe from the file contents df = parse_data(csv_file,", "FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT", "parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns =", "dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders,", "+ ')') for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': '", "Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the data into", "pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"]", "df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df = df.drop('city_status',", "into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting')", "return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if", "in enumerate(dateRangeValues): print ('Date range (' + str(i) + ')') for metricHeader, value", "intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df", "#blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'},", "column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def", "wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def main():", "]} ) ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service", "Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set", "the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints", "': ' + value) def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader", "Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics", "def source(url): if 'tag' in url: source = 'tag' elif 'author' in url:", "connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def main(): #", "oauth2client import file from oauth2client import tools import pandas as pd from pandas.io.json", "parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https'", "{'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url", "+ os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the", "report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows =", "parsing_utils import format_date_cols from parsing_utils import format_int_col from parsing_utils import format_str_col from parsing_utils", "ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges':", "'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]}", "be recast from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df", "# MAIN def main(): # connect to omnisci connection = wake_up_omnisci() if connection", "\"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names return (result) def output_to_csv(df, fn):", "arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a", "== 'www.mapd.com/blog/': source = 'omnisci blog landing page' elif 'community.mapd.com' in url: source", "TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT,", "set the column names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI", "url: source = 'author' elif url == 'www.mapd.com/blog': source = 'mapd blog landing", "in rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension", "reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data", "service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent):", "df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values to be", "= 'other / direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df", "to be used if we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES,", "file_path + 'client_secrets.json' # path to client_secrets.json file. start_date = '2017-04-01' tables_and_files =", "range (' + str(i) + ')') for metricHeader, value in zip(metricHeaders, values.get('values')): print", "= 'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json'", "import build import httplib2 from oauth2client import client from oauth2client import file from", "dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into table ' +", "techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8),", "tables and reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring,", "authorize HTTP object with them. # If the credentials don't exist or are", "(header + ': ' + dimension) for i, values in enumerate(dateRangeValues): print ('Date", "parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into table '", "(result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if 'tag'", "'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'],", "def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns", "landing page' elif url == 'www.mapd.com/blog/': source = 'omnisci blog landing page' elif", "\"time_on_page\"] # set the column names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False)", "'omnisci blog landing page' elif url == 'www.mapd.com/blog/': source = 'omnisci blog landing", "the data into the columnar tables OmniSci wants df = format_data(response) # save", "table ' + os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df)", "we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials,", "dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] #", "def wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) #", "the column names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS", "str(i) + ')') for metricHeader, value in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ':", "df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df", "DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'}", "= pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\",", "= tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the service object. analytics", "\"\"\" import argparse from apiclient.discovery import build import httplib2 from oauth2client import client", "Analytics Service Object to query the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute()", "[ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} )", "analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics Reporting API V4 response\"\"\"", "[\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names return", "= columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', [])", "{ 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics':", "oauth2client import client from oauth2client import file from oauth2client import tools import pandas", "connection # MAIN def main(): # connect to omnisci connection = wake_up_omnisci() if", "'other / direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def", "int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into table ' + os_table)", "' + dimension) for i, values in enumerate(dateRangeValues): print ('Date range (' +", "pd from pandas.io.json import json_normalize from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd", "omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25 # parameters for Google API", "credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the service", "str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect to Google Analytics analytics", "drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import format_int_col", "ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name", "landing page' elif url == 'www.mapd.com/blog/': source = 'mapd blog landing page' elif", "city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', {", "# Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) #", "API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path", "in url: source = 'tag' elif 'author' in url: source = 'author' elif", "forum' elif 'community.omnisci.com' in url: source = 'omnisci community forum' else: source =", "GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics an", "#load the new table into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could", "FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8),", "for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname", "for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH", "'author' elif url == 'www.mapd.com/blog': source = 'mapd blog landing page' elif url", "[]): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries',", "print ('loading dataframe into table ' + os_table) drop_table_mapd(connection, os_table) #drop the old", "from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from", "= reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows'])", "IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path", "credentials = storage.get() if credentials is None or credentials.invalid: credentials = tools.run_flow(flow, storage,", "to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile)", "DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT,", "used if we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) #", "os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: #", "loop through tables and reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols,", "!= \"RETRY\": # loop through tables and reports for os_table, csv_file, dt_cols, int_cols,", "str_cols, rename_cols, time_format) print ('loading dataframe into table ' + os_table) drop_table_mapd(connection, os_table)", "row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): print", "file_path + 'AdWords API Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost", "'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25 # parameters for Google", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Oct 15 15:48:38 2018", "'omnisci_keys.json' wait_interval = 25 # parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI", "FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns: analytics an authorized analyticsreporting", "an authorized analyticsreporting service object. \"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser(", "dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df = parse_source(df) df = parse_city(df)", "parse_city(df) return df # Load CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols,", "analyticsreporting service object. Returns: analytics an authorized analyticsreporting service object. \"\"\" # Parse", "= '2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'},", "The Storage object will ensure that if successful the good # credentials will", "dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True,", "= initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the data into the columnar", "'city_target_type', 'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity,", "def main(): # connect to omnisci connection = wake_up_omnisci() if connection != \"RETRY\":", "import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import", "data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns", "[]) for row in rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', [])", "= response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric", "time_format) print ('loading dataframe into table ' + os_table) drop_table_mapd(connection, os_table) #drop the", "Load CSV to dataframe def parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df", "for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection", "initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the data into the columnar tables", "@author: ericgrant \"\"\" import argparse from apiclient.discovery import build import httplib2 from oauth2client", "df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name',", "scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object with them. # If", "= report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows", "<reponame>omnisci/pymapd-examples #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon Oct", "the Analytics Service Object to query the Analytics Reporting API V4. return analytics.reports().batchGet(", "+ ': ' + dimension) for i, values in enumerate(dateRangeValues): print ('Date range", "get_report(analytics, reportbody) # format the data into the columnar tables OmniSci wants df", "from parsing_utils import format_date_cols from parsing_utils import format_int_col from parsing_utils import format_str_col from", "def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns =", "(metricHeader.get('name') + ': ' + value) def format_data(response): reports = response['reports'][0] columnHeader =", "Object to query the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response):", "pandas as pd from pandas.io.json import json_normalize from omnisci_utils import get_credentials from omnisci_utils", "+ ': ' + value) def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions']", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon Oct 15 15:48:38", "utf-8 -*- \"\"\" Created on Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\"", "{'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'}", "= client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object with", "new dataframe from the file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols,", "= parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe into table", "omnisci connection = wake_up_omnisci() if connection != \"RETRY\": # loop through tables and", "pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the", "authorized analyticsreporting service object. \"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter,", "strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the", "format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df =", "format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df = df.drop('geo_city_code', 1)", "mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser", "df = df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df def wake_up_omnisci(): #", "df = df.drop('city_status', 1) return df def wake_up_omnisci(): # get OmniSci credentials dfcreds", "], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]}", "object to be used if we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH,", "rows = report.get('data', {}).get('rows', []) for row in rows: dimensions = row.get('dimensions', [])", "wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def main(): # connect to", "dataframe into table ' + os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring)", "need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and", "data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\",", "DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT", "credentials don't exist or are invalid run through the native client # flow.", "to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df =", "fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url): if 'tag' in url: source", "OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing", "return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics Reporting API V4", "df = parse_source(df) df = parse_city(df) return df # Load CSV to dataframe", "= 'mapd community forum' elif 'community.omnisci.com' in url: source = 'omnisci community forum'", "result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column", "axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set", "25 # parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID", "{'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [", "to be recast from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols)", "format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv'", "reportbody in tables_and_files: # connect to Google Analytics analytics = initialize_analyticsreporting() response =", "source = 'other / direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return", "c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING", "the service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics,", "'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'},", "json_normalize from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd", "in url: source = 'mapd community forum' elif 'community.omnisci.com' in url: source =", "save the dataframe to a file output_to_csv(df, csv_file) # create the new dataframe", "{'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ]", "datetime values to be recast from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols)", "DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT", "flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object", "import json_normalize from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import", "[]) dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): print (header", "import argparse from apiclient.discovery import build import httplib2 from oauth2client import client from", "object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): #", "through the native client # flow. The Storage object will ensure that if", "Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the data", "file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv' # parameters for OmniSci", "': ' + dimension) for i, values in enumerate(dateRangeValues): print ('Date range ('", "strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df) df", "= pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df = parse_source(df) df", "('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json", "2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery import build import httplib2 from", "credentials is None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http())", "+ value) def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries']", "ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate':", "to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN def", "# connect to omnisci connection = wake_up_omnisci() if connection != \"RETRY\": # loop", "row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): print (header + ': '", "df.drop('city_status', 1) return df def wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame()", "OmniSci wants df = format_data(response) # save the dataframe to a file output_to_csv(df,", "credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time", "metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data =", "get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for the instance to wake connection", "wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols", "data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] #", "= file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None or credentials.invalid: credentials =", "elif 'author' in url: source = 'author' elif url == 'www.mapd.com/blog': source =", "= row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions):", "TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8),", "zip(dimensionHeaders, dimensions): print (header + ': ' + dimension) for i, values in", "' + value) def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader =", "pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result", "values to be recast from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df,", "analyticsreporting service object. \"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser])", "'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS", "'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator':", "credentials will get written back to a file. storage = file.Storage('analyticsreporting.dat') credentials =", "= 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25", "{'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'},", "= pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist())", "elif url == 'www.mapd.com/blog/': source = 'omnisci blog landing page' elif 'community.mapd.com' in", "import tools import pandas as pd from pandas.io.json import json_normalize from omnisci_utils import", "in zip(metricHeaders, values.get('values')): print (metricHeader.get('name') + ': ' + value) def format_data(response): reports", "# set the column names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) #", "[]) rows = report.get('data', {}).get('rows', []) for row in rows: dimensions = row.get('dimensions',", "\"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names return (result) def output_to_csv(df,", "dtcols, tfrmt) #force the column containing datetime values to be recast from strings", "reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in", "time_format, creationstring, reportbody in tables_and_files: # connect to Google Analytics analytics = initialize_analyticsreporting()", "the file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print", "CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object with them. #", "{}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data',", "mapddbname) return connection # MAIN def main(): # connect to omnisci connection =", "= df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df =", "pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics],", "to a file output_to_csv(df, csv_file) # create the new dataframe from the file", "source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code", "for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files:", "\"\"\"Initializes the analyticsreporting service object. Returns: analytics an authorized analyticsreporting service object. \"\"\"", "DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [", "report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders", "{'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ] # GOOGLE", "== 'www.mapd.com/blog/': source = 'mapd blog landing page' elif url == 'www.omnisci.com/blog': source", "for row in rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for", "or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build the", "# connect to Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) #", ") ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object.", "pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df = parse_source(df) df =", "def get_report(analytics, bodycontent): # Use the Analytics Service Object to query the Analytics", "to a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials is None", "http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use the Analytics Service Object", "\"RETRY\": # loop through tables and reports for os_table, csv_file, dt_cols, int_cols, float_cols,", "the columnar tables OmniSci wants df = format_data(response) # save the dataframe to", "for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics =", "if 'tag' in url: source = 'tag' elif 'author' in url: source =", "time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING", "'www.mapd.com/blog/': source = 'mapd blog landing page' elif url == 'www.omnisci.com/blog': source =", "import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import", "dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): print (header +", "from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df = parse_geo_data(df)", "columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda", "df = parse_city(df) return df # Load CSV to dataframe def parse_data(csvfile, dtcols,", "'expressions': ['blog']} ]} ]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting():", "recast from strings to timestamps format_int_col(df, intcols) format_str_col(df, strcols) format_flt_col(df, floatcols) df =", "tools import pandas as pd from pandas.io.json import json_normalize from omnisci_utils import get_credentials", "= [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the column names", "# Use the Analytics Service Object to query the Analytics Reporting API V4.", "names return (result) def output_to_csv(df, fn): df.to_csv(fn, index=False) # OMNISCI FUNCTIONS def source(url):", "source = 'tag' elif 'author' in url: source = 'author' elif url ==", "main(): # connect to omnisci connection = wake_up_omnisci() if connection != \"RETRY\": #", "# parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol =", "source = 'mapd blog landing page' elif url == 'www.mapd.com/blog/': source = 'mapd", "if connection != \"RETRY\": # loop through tables and reports for os_table, csv_file,", "reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions", "= data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True)", "'mapd blog landing page' elif url == 'www.mapd.com/blog/': source = 'mapd blog landing", "'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'})", "from oauth2client import client from oauth2client import file from oauth2client import tools import", "df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df def", "object. \"\"\" # Parse command-line arguments. parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags =", "file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF", "= report.get('data', {}).get('rows', []) for row in rows: dimensions = row.get('dimensions', []) dateRangeValues", "ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source", "+ 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT", "print (metricHeader.get('name') + ': ' + value) def format_data(response): reports = response['reports'][0] columnHeader", "that if successful the good # credentials will get written back to a", "VIEW_ID = '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json file.", "body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics Reporting API V4 response\"\"\" for", "/ direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df):", "the dataframe to a file output_to_csv(df, csv_file) # create the new dataframe from", "= df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df", "columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for row in rows: dimensions", "parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1)", "time for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return", "parse_data(csvfile, dtcols, intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df,", "values in enumerate(dateRangeValues): print ('Date range (' + str(i) + ')') for metricHeader,", "renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column", "if we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare", "allowing time for the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname)", "= credentials.authorize(http=httplib2.Http()) # Build the service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)", "elif url == 'www.mapd.com/blog/': source = 'mapd blog landing page' elif url ==", "how='left') return df def parse_geo_data(df): df = parse_source(df) df = parse_city(df) return df", "df = parse_geo_data(df) df = df.drop('geo_city_code', 1) df = df.drop('city_parent_code', 1) df =", "into the columnar tables OmniSci wants df = format_data(response) # save the dataframe", "inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing datetime values to be recast", "from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from", "{'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle',", "dataframe to a file output_to_csv(df, csv_file) # create the new dataframe from the", "columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for", "df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing datetime", "parser.parse_args([]) # Set up a Flow object to be used if we need", "for i, values in enumerate(dateRangeValues): print ('Date range (' + str(i) + ')')", "data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result =", "float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect to Google Analytics", "in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders =", "build import httplib2 from oauth2client import client from oauth2client import file from oauth2client", "be used if we need to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH))", "'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS", "'www.omnisci.com/blog': source = 'omnisci blog landing page' elif url == 'www.mapd.com/blog/': source =", "format the data into the columnar tables OmniSci wants df = format_data(response) #", "{'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']}", "bodycontent): # Use the Analytics Service Object to query the Analytics Reporting API", "report.get('data', {}).get('rows', []) for row in rows: dimensions = row.get('dimensions', []) dateRangeValues =", "in tables_and_files: # connect to Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics,", "through tables and reports for os_table, csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format,", "+ 'client_secrets.json' # path to client_secrets.json file. start_date = '2017-04-01' tables_and_files = [", "If the credentials don't exist or are invalid run through the native client", "'tag' in url: source = 'tag' elif 'author' in url: source = 'author'", "else: source = 'other / direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source)", "the instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection #", "mapdhost, mapddbname) return connection # MAIN def main(): # connect to omnisci connection", "[ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses':", "source = 'author' elif url == 'www.mapd.com/blog': source = 'mapd blog landing page'", "views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE", "output_to_csv(df, csv_file) # create the new dataframe from the file contents df =", "import pandas as pd from pandas.io.json import json_normalize from omnisci_utils import get_credentials from", "tables_and_files: # connect to Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody)", "to authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize", "= wake_up_omnisci() if connection != \"RETRY\": # loop through tables and reports for", "start_date = '2017-04-01' tables_and_files = [ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv',", "tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt) #force the column containing", "'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING", "], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'}", "{}, {'geo_city_code'}, \"%Y%m%d\", 'CREATE TABLE IF NOT EXISTS techsup_ga_blogvisits (blog_title TEXT ENCODING DICT(8),", "blog landing page' elif url == 'www.mapd.com/blog/': source = 'omnisci blog landing page'", "'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize':", "dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci, allowing time for the instance to", "old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into OmniSci # disconnect", "import file from oauth2client import tools import pandas as pd from pandas.io.json import", "if successful the good # credentials will get written back to a file.", "disconnect_mapd from parsing_utils import format_date_cols from parsing_utils import format_int_col from parsing_utils import format_str_col", "Use the Analytics Service Object to query the Analytics Reporting API V4. return", "blog landing page' elif url == 'www.mapd.com/blog/': source = 'mapd blog landing page'", "FUNCTIONS def source(url): if 'tag' in url: source = 'tag' elif 'author' in", "row in rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header,", "], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name':", "page' elif 'community.mapd.com' in url: source = 'mapd community forum' elif 'community.omnisci.com' in", "blog landing page' elif 'community.mapd.com' in url: source = 'mapd community forum' elif", "{}).get('rows', []) for row in rows: dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics',", "from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from", "2018-09-04.csv' # parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol", "'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions':", "= argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]) flags = parser.parse_args([]) # Set up a Flow object", "url: source = 'omnisci community forum' else: source = 'other / direct' return", "'mapd community forum' elif 'community.omnisci.com' in url: source = 'omnisci community forum' else:", "analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def get_report(analytics, bodycontent): # Use", "return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code',", "dimensions): print (header + ': ' + dimension) for i, values in enumerate(dateRangeValues):", "df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code',", "prints the Analytics Reporting API V4 response\"\"\" for report in response.get('reports', []): columnHeader", "df = df.drop('city_parent_code', 1) df = df.drop('city_target_type', 1) df = df.drop('city_status', 1) return", "Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the", "443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path", "value) def format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns", "wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect", "mapdprotocol = 'https' mapddbname = 'mapd' mapduser = 'mapd' omnisci_keyfile = file_path +", "client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP object with them.", "contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading dataframe", "community forum' elif 'community.omnisci.com' in url: source = 'omnisci community forum' else: source", "'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'},", "return df def parse_geo_data(df): df = parse_source(df) df = parse_city(df) return df #", "return df def wake_up_omnisci(): # get OmniSci credentials dfcreds = pd.DataFrame() dfcreds =", "unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name", "start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ],", "None or credentials.invalid: credentials = tools.run_flow(flow, storage, flags) http = credentials.authorize(http=httplib2.Http()) # Build", "result = pd.concat([data_dimensions, data_metrics], axis=1, ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\",", "'client_secrets.json' # path to client_secrets.json file. start_date = '2017-04-01' tables_and_files = [ #blog", "= 25 # parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest')", "format_date_cols from parsing_utils import format_int_col from parsing_utils import format_str_col from parsing_utils import format_flt_col", "] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting service object. Returns:", "authenticate. flow = client.flow_from_clientsecrets( CLIENT_SECRETS_PATH, scope=SCOPES, message=tools.message_if_missing(CLIENT_SECRETS_PATH)) # Prepare credentials, and authorize HTTP", "dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format, creationstring, reportbody in tables_and_files: # connect to", "get OmniSci credentials dfcreds = pd.DataFrame() dfcreds = get_credentials(omnisci_keyfile) # connect to OmniSci,", "'city_parent_code', 'country_code', 'city_target_type', 'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'}) df =", "coding: utf-8 -*- \"\"\" Created on Mon Oct 15 15:48:38 2018 @author: ericgrant", "from parsing_utils import format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes =", "wait_interval = 25 # parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI =", "from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API", "disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting') if __name__ ==", "analytics def get_report(analytics, bodycontent): # Use the Analytics Service Object to query the", "in url: source = 'omnisci community forum' else: source = 'other / direct'", "credentials.authorize(http=httplib2.Http()) # Build the service object. analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return", "referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page FLOAT, source TEXT ENCODING", "import format_date_cols from parsing_utils import format_int_col from parsing_utils import format_str_col from parsing_utils import", "{}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) for row in rows: dimensions =", "to omnisci connection = wake_up_omnisci() if connection != \"RETRY\": # loop through tables", "= [ #blog post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'},", "'omnisci blog landing page' elif 'community.mapd.com' in url: source = 'mapd community forum'", "-*- coding: utf-8 -*- \"\"\" Created on Mon Oct 15 15:48:38 2018 @author:", "object. Returns: analytics an authorized analyticsreporting service object. \"\"\" # Parse command-line arguments.", "# parameters for Google API SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') VIEW_ID =", "Flow object to be used if we need to authenticate. flow = client.flow_from_clientsecrets(", "# set the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df, dfcity, on=['geo_city_code'], how='left')", "format_data(response): reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader", "url == 'www.mapd.com/blog': source = 'mapd blog landing page' elif url == 'www.mapd.com/blog/':", "will get written back to a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get()", "10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [ {'name': 'ga:pageTitle'}, {'name':", "15:48:38 2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery import build import httplib2", "city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId':", "values.get('values')): print (metricHeader.get('name') + ': ' + value) def format_data(response): reports = response['reports'][0]", "from pandas.io.json import json_normalize from omnisci_utils import get_credentials from omnisci_utils import wake_and_connect_to_mapd from", "'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date, 'endDate': 'today'} ], 'pageSize': 10000, 'metrics': [", "parsing_utils import format_int_col from parsing_utils import format_str_col from parsing_utils import format_flt_col file_path =", "parse_geo_data(df): df = parse_source(df) df = parse_city(df) return df # Load CSV to", "omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils", "API Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport", "# flow. The Storage object will ensure that if successful the good #", "on Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery", "url == 'www.mapd.com/blog/': source = 'mapd blog landing page' elif url == 'www.omnisci.com/blog':", "country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate':", "V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and prints the Analytics Reporting API", "= '93521025' CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json file. start_date", "CLIENT_SECRETS_PATH = file_path + 'client_secrets.json' # path to client_secrets.json file. start_date = '2017-04-01'", "= 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser =", "the new table into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could not", "'today'} ], 'pageSize': 10000, 'metrics': [ {'expression': 'ga:uniquePageviews'}, {'expression': 'ga:timeOnPage'} ], 'dimensions': [", "df def parse_geo_data(df): df = parse_source(df) df = parse_city(df) return df # Load", "return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity =", "# disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake OmniSci; exiting') if __name__", "ENCODING DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING", "df.drop('city_target_type', 1) df = df.drop('city_status', 1) return df def wake_up_omnisci(): # get OmniSci", "import format_int_col from parsing_utils import format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/'", "Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud mapdhost = 'use2-api.mapd.cloud' mapdport =", "15 15:48:38 2018 @author: ericgrant \"\"\" import argparse from apiclient.discovery import build import", "post views ('techsup_ga_blogvisits', file_path + 'techsup_ga_blogvisits.csv', {'c1_timestamp'}, {}, {'time_on_page', 'unique_pageviews'}, {}, {'geo_city_code'}, \"%Y%m%d\",", "[ {'dimensionName': 'ga:pageTitle', 'operator': 'PARTIAL', 'expressions': ['blog']} ]} ]} ]} ) ] #", "will ensure that if successful the good # credentials will get written back", "= reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name'])", "direct' return (source) def parse_source(df): df['source'] = df['blog_url'].apply(source) return df def parse_city(df): dfcity", "\"\"\" Created on Mon Oct 15 15:48:38 2018 @author: ericgrant \"\"\" import argparse", "]} ]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the", "written back to a file. storage = file.Storage('analyticsreporting.dat') credentials = storage.get() if credentials", "url: source = 'tag' elif 'author' in url: source = 'author' elif url", "'/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location Criteria 2018-09-04.csv' # parameters for", "'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname = 'mapd' mapduser = 'mapd'", "new table into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else: print('could not wake", "df = pd.merge(df, dfcity, on=['geo_city_code'], how='left') return df def parse_geo_data(df): df = parse_source(df)", "parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path + 'AdWords API Location", "= 'omnisci community forum' else: source = 'other / direct' return (source) def", "elif 'community.mapd.com' in url: source = 'mapd community forum' elif 'community.omnisci.com' in url:", "source = 'omnisci blog landing page' elif url == 'www.mapd.com/blog/': source = 'omnisci", "# Prepare credentials, and authorize HTTP object with them. # If the credentials", "table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into OmniSci # disconnect from", "the native client # flow. The Storage object will ensure that if successful", "= file_path + 'AdWords API Location Criteria 2018-09-04.csv' # parameters for OmniSci Cloud", "to Google Analytics analytics = initialize_analyticsreporting() response = get_report(analytics, reportbody) # format the", "ensure that if successful the good # credentials will get written back to", "= file_path + 'client_secrets.json' # path to client_secrets.json file. start_date = '2017-04-01' tables_and_files", "get_credentials from omnisci_utils import wake_and_connect_to_mapd from omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd", "run through the native client # flow. The Storage object will ensure that", "intcols, floatcols, strcols, renamings, tfrmt): df = pd.read_csv(csvfile) df.reset_index(drop=True, inplace=True) format_date_cols(df, dtcols, tfrmt)", "OMNISCI FUNCTIONS def source(url): if 'tag' in url: source = 'tag' elif 'author'", "= parser.parse_args([]) # Set up a Flow object to be used if we", "file contents df = parse_data(csv_file, dt_cols, int_cols, float_cols, str_cols, rename_cols, time_format) print ('loading", "(blog_title TEXT ENCODING DICT(8), blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp", "column containing datetime values to be recast from strings to timestamps format_int_col(df, intcols)", "os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table, df) #load the new table into", "omnisci_utils import drop_table_mapd from omnisci_utils import disconnect_mapd from parsing_utils import format_date_cols from parsing_utils", "in metricHeader: columns.append(metric['name']) data = json_normalize(reports['data']['rows']) data_dimensions = pd.DataFrame(data['dimensions'].tolist()) data_metrics = pd.DataFrame(data['metrics'].tolist()) data_metrics", "data_metrics = data_metrics.applymap(lambda x: x['values']) data_metrics = pd.DataFrame(data_metrics[0].tolist()) result = pd.concat([data_dimensions, data_metrics], axis=1,", "V4 response\"\"\" for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders =", "DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));', { 'reportRequests': [ {", "from parsing_utils import format_int_col from parsing_utils import format_str_col from parsing_utils import format_flt_col file_path", "elif 'community.omnisci.com' in url: source = 'omnisci community forum' else: source = 'other", "{'name': 'ga:pageTitle'}, {'name': 'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [", "import format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path +", "ignore_index=True) result.columns = [\"blog_title\", \"blog_url\", \"referral_path\", \"c1_timestamp\", \"geo_city_code\", \"unique_pageviews\", \"time_on_page\"] # set the", "df) #load the new table into OmniSci # disconnect from OmniSci disconnect_mapd(connection) else:", "DICT(8), city_name TEXT ENCODING DICT(8), city_canonical_name TEXT ENCODING DICT(8), country_code TEXT ENCODING DICT(8));',", "'ga:pagePath'}, {'name': 'ga:referralPath'}, {'name': 'ga:date'}, {'name': 'ga:cityID'} ], 'dimensionFilterClauses': [ {'filters': [ {'dimensionName':", "def print_response(response): \"\"\"Parses and prints the Analytics Reporting API V4 response\"\"\" for report", "elif url == 'www.mapd.com/blog': source = 'mapd blog landing page' elif url ==", "the good # credentials will get written back to a file. storage =", "page' elif url == 'www.omnisci.com/blog': source = 'omnisci blog landing page' elif url", "blog_url TEXT ENCODING DICT(8), referral_path TEXT ENCODING DICT(8), c1_timestamp TIMESTAMP, unique_pageviews FLOAT, time_on_page", "page' elif url == 'www.mapd.com/blog/': source = 'mapd blog landing page' elif url", "query the Analytics Reporting API V4. return analytics.reports().batchGet( body=bodycontent).execute() def print_response(response): \"\"\"Parses and", "into table ' + os_table) drop_table_mapd(connection, os_table) #drop the old table connection.execute(creationstring) connection.load_table(os_table,", "def parse_city(df): dfcity = pd.read_csv(file_geocodes) dfcity.columns = ['geo_city_code', 'city_name', 'city_canonical_name', 'city_parent_code', 'country_code', 'city_target_type',", "print ('Date range (' + str(i) + ')') for metricHeader, value in zip(metricHeaders,", "Cloud mapdhost = 'use2-api.mapd.cloud' mapdport = 443 mapdprotocol = 'https' mapddbname = 'mapd'", "url: source = 'mapd community forum' elif 'community.omnisci.com' in url: source = 'omnisci", "in zip(dimensionHeaders, dimensions): print (header + ': ' + dimension) for i, values", "for header, dimension in zip(dimensionHeaders, dimensions): print (header + ': ' + dimension)", "'country_code', 'city_target_type', 'city_status'] # set the column names format_str_col(dfcity, {'geo_city_code'}) df = pd.merge(df,", "parsing_utils import format_str_col from parsing_utils import format_flt_col file_path = '/Users/ericgrant/Downloads/OKR_Dashboards/xfer/' file_geocodes = file_path", "instance to wake connection = wake_and_connect_to_mapd(dfcreds['write_key_name'], dfcreds['write_key_secret'], mapdhost, mapddbname) return connection # MAIN", "'mapd' mapduser = 'mapd' omnisci_keyfile = file_path + 'omnisci_keys.json' wait_interval = 25 #", "]} ]} ) ] # GOOGLE ANALYTICS FUNCTIONS def initialize_analyticsreporting(): \"\"\"Initializes the analyticsreporting", "TEXT ENCODING DICT(8));', { 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [ {'startDate': start_date," ]
[ "Note with all attributes set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) ->", "with all attributes set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]:", "request (FixtureRequest): `request.param` is the length of the generator Yields: Note: Example note,", "-> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the length of the generator", "def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the length", "import Note def _example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337,", "return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param`", "of the generator Yields: Note: Example note, see `example_note()` \"\"\" return (_example_note() for", "all attributes set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\"", "set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request", "from typing import Iterator from _pytest.fixtures import FixtureRequest from pytest import fixture from", "import fixture from nextcloud_notes_api import Note def _example_note() -> Note: return Note( title='Spam',", "https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note: \"\"\" Returns: Note: Note with", "Note def _example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, #", "favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note: \"\"\" Returns:", "Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the length of the generator Yields:", "the length of the generator Yields: Note: Example note, see `example_note()` \"\"\" return", "return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def", "category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note: \"\"\"", "\"\"\" Args: request (FixtureRequest): `request.param` is the length of the generator Yields: Note:", "import Iterator from _pytest.fixtures import FixtureRequest from pytest import fixture from nextcloud_notes_api import", "(FixtureRequest): `request.param` is the length of the generator Yields: Note: Example note, see", "Iterator from _pytest.fixtures import FixtureRequest from pytest import fixture from nextcloud_notes_api import Note", "Yields: Note: Example note, see `example_note()` \"\"\" return (_example_note() for _ in range(request.param))", "\"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest):", "-> Note: \"\"\" Returns: Note: Note with all attributes set \"\"\" return _example_note()", "pytest import fixture from nextcloud_notes_api import Note def _example_note() -> Note: return Note(", "@fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the", "import FixtureRequest from pytest import fixture from nextcloud_notes_api import Note def _example_note() ->", "the generator Yields: Note: Example note, see `example_note()` \"\"\" return (_example_note() for _", "Returns: Note: Note with all attributes set \"\"\" return _example_note() @fixture def example_note_gen(request:", "fixture from nextcloud_notes_api import Note def _example_note() -> Note: return Note( title='Spam', content='Bacon',", "from nextcloud_notes_api import Note def _example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo',", ") @fixture def example_note() -> Note: \"\"\" Returns: Note: Note with all attributes", "title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() ->", "# https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note: \"\"\" Returns: Note: Note", "Note: Note with all attributes set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest)", "FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the length of the", "length of the generator Yields: Note: Example note, see `example_note()` \"\"\" return (_example_note()", "def example_note() -> Note: \"\"\" Returns: Note: Note with all attributes set \"\"\"", "id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note: \"\"\" Returns: Note:", "Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note()", "Note: \"\"\" Returns: Note: Note with all attributes set \"\"\" return _example_note() @fixture", "attributes set \"\"\" return _example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args:", "example_note() -> Note: \"\"\" Returns: Note: Note with all attributes set \"\"\" return", "modified=100_000, ) @fixture def example_note() -> Note: \"\"\" Returns: Note: Note with all", "FixtureRequest from pytest import fixture from nextcloud_notes_api import Note def _example_note() -> Note:", "_example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000,", "`request.param` is the length of the generator Yields: Note: Example note, see `example_note()`", "content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture def example_note() -> Note:", "Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, ) @fixture", "example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is the length of", "Args: request (FixtureRequest): `request.param` is the length of the generator Yields: Note: Example", "generator Yields: Note: Example note, see `example_note()` \"\"\" return (_example_note() for _ in", "\"\"\" Returns: Note: Note with all attributes set \"\"\" return _example_note() @fixture def", "-> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a modified=100_000, )", "from pytest import fixture from nextcloud_notes_api import Note def _example_note() -> Note: return", "nextcloud_notes_api import Note def _example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True,", "_pytest.fixtures import FixtureRequest from pytest import fixture from nextcloud_notes_api import Note def _example_note()", "is the length of the generator Yields: Note: Example note, see `example_note()` \"\"\"", "typing import Iterator from _pytest.fixtures import FixtureRequest from pytest import fixture from nextcloud_notes_api", "from _pytest.fixtures import FixtureRequest from pytest import fixture from nextcloud_notes_api import Note def", "def _example_note() -> Note: return Note( title='Spam', content='Bacon', category='Todo', favorite=True, id=1337, # https://stackoverflow.com/questions/59199985/why-is-datetimes-timestamp-method-returning-oserror-errno-22-invalid-a", "@fixture def example_note() -> Note: \"\"\" Returns: Note: Note with all attributes set", "_example_note() @fixture def example_note_gen(request: FixtureRequest) -> Iterator[Note]: \"\"\" Args: request (FixtureRequest): `request.param` is" ]
[ "import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import", "from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi from .folder_api import FolderApi from", "from .connection_api import ConnectionApi from .content_api import ContentApi from .dashboard_api import DashboardApi from", "import DataActionApi from .datagroup_api import DatagroupApi from .folder_api import FolderApi from .group_api import", "# import apis into api package from .api_auth_api import ApiAuthApi from .auth_api import", "api package from .api_auth_api import ApiAuthApi from .auth_api import AuthApi from .color_collection_api import", ".data_action_api import DataActionApi from .datagroup_api import DatagroupApi from .folder_api import FolderApi from .group_api", "from .look_api import LookApi from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi from", ".group_api import GroupApi from .homepage_api import HomepageApi from .integration_api import IntegrationApi from .look_api", "import ApiAuthApi from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi from .config_api import", ".dashboard_api import DashboardApi from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi from .folder_api", "AuthApi from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi from .connection_api import ConnectionApi", ".render_task_api import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api", "from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi from", "ColorCollectionApi from .config_api import ConfigApi from .connection_api import ConnectionApi from .content_api import ContentApi", "FolderApi from .group_api import GroupApi from .homepage_api import HomepageApi from .integration_api import IntegrationApi", "from .session_api import SessionApi from .space_api import SpaceApi from .theme_api import ThemeApi from", ".role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from .space_api", "<reponame>jcarah/python_sdk from __future__ import absolute_import # import apis into api package from .api_auth_api", "ProjectApi from .query_api import QueryApi from .render_task_api import RenderTaskApi from .role_api import RoleApi", ".scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from .space_api import SpaceApi from .theme_api", ".session_api import SessionApi from .space_api import SpaceApi from .theme_api import ThemeApi from .user_api", "from .content_api import ContentApi from .dashboard_api import DashboardApi from .data_action_api import DataActionApi from", "into api package from .api_auth_api import ApiAuthApi from .auth_api import AuthApi from .color_collection_api", "ConfigApi from .connection_api import ConnectionApi from .content_api import ContentApi from .dashboard_api import DashboardApi", ".project_api import ProjectApi from .query_api import QueryApi from .render_task_api import RenderTaskApi from .role_api", ".look_api import LookApi from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi from .query_api", "from .integration_api import IntegrationApi from .look_api import LookApi from .lookml_model_api import LookmlModelApi from", "import ScheduledPlanApi from .session_api import SessionApi from .space_api import SpaceApi from .theme_api import", "SpaceApi from .theme_api import ThemeApi from .user_api import UserApi from .user_attribute_api import UserAttributeApi", "package from .api_auth_api import ApiAuthApi from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi", "import DashboardApi from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi from .folder_api import", "SessionApi from .space_api import SpaceApi from .theme_api import ThemeApi from .user_api import UserApi", ".connection_api import ConnectionApi from .content_api import ContentApi from .dashboard_api import DashboardApi from .data_action_api", "GroupApi from .homepage_api import HomepageApi from .integration_api import IntegrationApi from .look_api import LookApi", "import SessionApi from .space_api import SpaceApi from .theme_api import ThemeApi from .user_api import", "IntegrationApi from .look_api import LookApi from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi", "from .render_task_api import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from", "import ConfigApi from .connection_api import ConnectionApi from .content_api import ContentApi from .dashboard_api import", ".auth_api import AuthApi from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi from .connection_api", "from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi from .connection_api import ConnectionApi from", "import AuthApi from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi from .connection_api import", "from .theme_api import ThemeApi from .user_api import UserApi from .user_attribute_api import UserAttributeApi from", "apis into api package from .api_auth_api import ApiAuthApi from .auth_api import AuthApi from", "import ThemeApi from .user_api import UserApi from .user_attribute_api import UserAttributeApi from .workspace_api import", ".api_auth_api import ApiAuthApi from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi from .config_api", "LookApi from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi from .query_api import QueryApi", "from .group_api import GroupApi from .homepage_api import HomepageApi from .integration_api import IntegrationApi from", "from .api_auth_api import ApiAuthApi from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi from", "QueryApi from .render_task_api import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi", "import SpaceApi from .theme_api import ThemeApi from .user_api import UserApi from .user_attribute_api import", "import absolute_import # import apis into api package from .api_auth_api import ApiAuthApi from", "import ProjectApi from .query_api import QueryApi from .render_task_api import RenderTaskApi from .role_api import", "ConnectionApi from .content_api import ContentApi from .dashboard_api import DashboardApi from .data_action_api import DataActionApi", "RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi", "LookmlModelApi from .project_api import ProjectApi from .query_api import QueryApi from .render_task_api import RenderTaskApi", "import HomepageApi from .integration_api import IntegrationApi from .look_api import LookApi from .lookml_model_api import", ".theme_api import ThemeApi from .user_api import UserApi from .user_attribute_api import UserAttributeApi from .workspace_api", ".homepage_api import HomepageApi from .integration_api import IntegrationApi from .look_api import LookApi from .lookml_model_api", ".folder_api import FolderApi from .group_api import GroupApi from .homepage_api import HomepageApi from .integration_api", "from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi from .query_api import QueryApi from", "from .config_api import ConfigApi from .connection_api import ConnectionApi from .content_api import ContentApi from", ".datagroup_api import DatagroupApi from .folder_api import FolderApi from .group_api import GroupApi from .homepage_api", "from .homepage_api import HomepageApi from .integration_api import IntegrationApi from .look_api import LookApi from", "import ColorCollectionApi from .config_api import ConfigApi from .connection_api import ConnectionApi from .content_api import", "import DatagroupApi from .folder_api import FolderApi from .group_api import GroupApi from .homepage_api import", "import QueryApi from .render_task_api import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api import", "DatagroupApi from .folder_api import FolderApi from .group_api import GroupApi from .homepage_api import HomepageApi", "RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from .space_api import SpaceApi", "HomepageApi from .integration_api import IntegrationApi from .look_api import LookApi from .lookml_model_api import LookmlModelApi", ".config_api import ConfigApi from .connection_api import ConnectionApi from .content_api import ContentApi from .dashboard_api", "import ConnectionApi from .content_api import ContentApi from .dashboard_api import DashboardApi from .data_action_api import", "from .folder_api import FolderApi from .group_api import GroupApi from .homepage_api import HomepageApi from", "import IntegrationApi from .look_api import LookApi from .lookml_model_api import LookmlModelApi from .project_api import", "import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from .space_api import", "import LookmlModelApi from .project_api import ProjectApi from .query_api import QueryApi from .render_task_api import", "from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from .space_api import SpaceApi from", "from .query_api import QueryApi from .render_task_api import RenderTaskApi from .role_api import RoleApi from", "absolute_import # import apis into api package from .api_auth_api import ApiAuthApi from .auth_api", "from .space_api import SpaceApi from .theme_api import ThemeApi from .user_api import UserApi from", ".integration_api import IntegrationApi from .look_api import LookApi from .lookml_model_api import LookmlModelApi from .project_api", "__future__ import absolute_import # import apis into api package from .api_auth_api import ApiAuthApi", "import FolderApi from .group_api import GroupApi from .homepage_api import HomepageApi from .integration_api import", "from .role_api import RoleApi from .scheduled_plan_api import ScheduledPlanApi from .session_api import SessionApi from", ".space_api import SpaceApi from .theme_api import ThemeApi from .user_api import UserApi from .user_attribute_api", ".content_api import ContentApi from .dashboard_api import DashboardApi from .data_action_api import DataActionApi from .datagroup_api", "from .datagroup_api import DatagroupApi from .folder_api import FolderApi from .group_api import GroupApi from", "import apis into api package from .api_auth_api import ApiAuthApi from .auth_api import AuthApi", "ApiAuthApi from .auth_api import AuthApi from .color_collection_api import ColorCollectionApi from .config_api import ConfigApi", "import ContentApi from .dashboard_api import DashboardApi from .data_action_api import DataActionApi from .datagroup_api import", ".query_api import QueryApi from .render_task_api import RenderTaskApi from .role_api import RoleApi from .scheduled_plan_api", ".color_collection_api import ColorCollectionApi from .config_api import ConfigApi from .connection_api import ConnectionApi from .content_api", "from __future__ import absolute_import # import apis into api package from .api_auth_api import", "DataActionApi from .datagroup_api import DatagroupApi from .folder_api import FolderApi from .group_api import GroupApi", "import GroupApi from .homepage_api import HomepageApi from .integration_api import IntegrationApi from .look_api import", "import LookApi from .lookml_model_api import LookmlModelApi from .project_api import ProjectApi from .query_api import", "from .dashboard_api import DashboardApi from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi from", ".lookml_model_api import LookmlModelApi from .project_api import ProjectApi from .query_api import QueryApi from .render_task_api", "ScheduledPlanApi from .session_api import SessionApi from .space_api import SpaceApi from .theme_api import ThemeApi", "ContentApi from .dashboard_api import DashboardApi from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi", "from .project_api import ProjectApi from .query_api import QueryApi from .render_task_api import RenderTaskApi from", "DashboardApi from .data_action_api import DataActionApi from .datagroup_api import DatagroupApi from .folder_api import FolderApi", "ThemeApi from .user_api import UserApi from .user_attribute_api import UserAttributeApi from .workspace_api import WorkspaceApi" ]
[ "insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?)", "conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin =", "Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn = create_connection(db) create_table(conn) humidity, temperature =", "c = conn.cursor() c.execute(create_table_query) except Error as e: print(e) def create_connection(db): conn =", "def insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?,", "Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\"", "e: print(e) return conn def insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity,", "= datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20)", "= conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin", "= (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does", "measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\") if __name__ ==", "print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\") if __name__ == '__main__':", "= sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return conn def insertMeasure(conn, measure):", ");\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error as e: print(e) def create_connection(db):", "temperature, ts) VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def", "( id integer PRIMARY KEY, humidity real , temperature real, ts text );\"\"\"", "work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn =", "except Error as e: print(e) return conn def insertMeasure(conn, measure): insert_query = '''", "conn = None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e)", "insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\") if __name__", "while True: conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts =", "as e: print(e) return conn def insertMeasure(conn, measure): insert_query = ''' INSERT INTO", "(humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not", "\"\"\" CREATE TABLE IF NOT EXISTS dht_data ( id integer PRIMARY KEY, humidity", "Error as e: print(e) def create_connection(db): conn = None try: conn = sqlite3.connect(db)", "c.execute(create_table_query) except Error as e: print(e) def create_connection(db): conn = None try: conn", "real, ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error as e:", "conn = sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return conn def insertMeasure(conn,", "True: conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp()", "as e: print(e) def create_connection(db): conn = None try: conn = sqlite3.connect(db) print(sqlite3.version)", "= None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return", "sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure))", "NOT EXISTS dht_data ( id integer PRIMARY KEY, humidity real , temperature real,", "sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn = create_connection(db) create_table(conn) humidity,", "return conn def insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts)", "= create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure =", "import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE", "def create_connection(db): conn = None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as", "temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\")", "except Error as e: print(e) def create_connection(db): conn = None try: conn =", "= ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur =", "= conn.cursor() c.execute(create_table_query) except Error as e: print(e) def create_connection(db): conn = None", "try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return conn def", "measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection", "''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur = conn.cursor()", "text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error as e: print(e) def", "time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE", "Error as e: print(e) return conn def insertMeasure(conn, measure): insert_query = ''' INSERT", "dht_data ( id integer PRIMARY KEY, humidity real , temperature real, ts text", "create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS dht_data ( id integer", "from sqlite3 import Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn):", "CREATE TABLE IF NOT EXISTS dht_data ( id integer PRIMARY KEY, humidity real", "id integer PRIMARY KEY, humidity real , temperature real, ts text );\"\"\" try:", "None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return conn", "?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor", "= 18 while True: conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin)", "ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close()", "?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT", "import Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query =", "try: c = conn.cursor() c.execute(create_table_query) except Error as e: print(e) def create_connection(db): conn", "{}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\") if __name__ == '__main__': work()", "conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure", "print(e) return conn def insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature,", "sqlite3 import Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query", "dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit()", "datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT", "cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22", "IF NOT EXISTS dht_data ( id integer PRIMARY KEY, humidity real , temperature", "import sqlite3 from sqlite3 import Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\"", "measure) conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while", "KEY, humidity real , temperature real, ts text );\"\"\" try: c = conn.cursor()", "INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure)", "ts) VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work():", "cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18", "temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn,", "VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import", "= Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure)", "create_connection(db): conn = None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error as e:", "humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts)", "datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database", "INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur = conn.cursor() cur.execute(insert_query,", "conn def insertMeasure(conn, measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?,", "temperature real, ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error as", "e: print(e) def create_connection(db): conn = None try: conn = sqlite3.connect(db) print(sqlite3.version) except", "def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS dht_data ( id", "print(e) def create_connection(db): conn = None try: conn = sqlite3.connect(db) print(sqlite3.version) except Error", "conn.commit() def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True:", "create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS dht_data ( id integer PRIMARY", "''' cur = conn.cursor() cur.execute(insert_query, measure) conn.commit() def work(): import Adafruit_DHT sensor =", "PRIMARY KEY, humidity real , temperature real, ts text );\"\"\" try: c =", "import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF", "= \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS dht_data", "create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature,", "ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error as e: print(e)", "print(sqlite3.version) except Error as e: print(e) return conn def insertMeasure(conn, measure): insert_query =", "TABLE IF NOT EXISTS dht_data ( id integer PRIMARY KEY, humidity real ,", "18 while True: conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts", "integer PRIMARY KEY, humidity real , temperature real, ts text );\"\"\" try: c", "= \"\"\" CREATE TABLE IF NOT EXISTS dht_data ( id integer PRIMARY KEY,", "ts) insertMeasure(conn, measure) print(\"inserted {}\".format(measure)) conn.close() time.sleep(20) print(\"Database connection does not exist\") if", "EXISTS dht_data ( id integer PRIMARY KEY, humidity real , temperature real, ts", "import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn = create_connection(db)", "def work(): import Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn", "sqlite3.connect(db) print(sqlite3.version) except Error as e: print(e) return conn def insertMeasure(conn, measure): insert_query", "sqlite3 from sqlite3 import Error import time import datetime db = \"/home/pi/projects/pigrow/db.sqlite3\" def", "create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity,", "db = \"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS", ", temperature real, ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except Error", "sensor_pin = 18 while True: conn = create_connection(db) create_table(conn) humidity, temperature = Adafruit_DHT.read_retry(sensor,", "= Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn = create_connection(db) create_table(conn) humidity, temperature", "insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) ''' cur", "\"/home/pi/projects/pigrow/db.sqlite3\" def create_table(conn): create_table_query = \"\"\" CREATE TABLE IF NOT EXISTS dht_data (", "humidity real , temperature real, ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query)", "real , temperature real, ts text );\"\"\" try: c = conn.cursor() c.execute(create_table_query) except", "conn.cursor() c.execute(create_table_query) except Error as e: print(e) def create_connection(db): conn = None try:", "Adafruit_DHT.read_retry(sensor, sensor_pin) ts = datetime.datetime.now().timestamp() measure = (humidity, temperature, ts) insertMeasure(conn, measure) print(\"inserted", "Adafruit_DHT sensor = Adafruit_DHT.DHT22 sensor_pin = 18 while True: conn = create_connection(db) create_table(conn)", "measure): insert_query = ''' INSERT INTO dht_data(humidity, temperature, ts) VALUES(?, ?, ?) '''" ]
[ "import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField()", "allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model", "юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model = User fields = [", "] def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации", "\"\"\" Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True)", "max_length=256, write_only=True) class Meta: model = User fields = [ 'password', 'password_<PASSWORD>', 'old_password',", "max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model = User", "'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\"", "User from rest_framework import serializers from rest_framework.validators import UniqueValidator from Users.utils import is_moderator", "return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False,", "рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password", "= serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model = User fields = [", "'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор", "= [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return", "fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data): if", "= User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы", "password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True)", "представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model = User fields =", "= serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class", "username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False,", "] def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен", "UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\"", "rest_framework.validators import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления", "old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model = User fields =", "class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta:", "] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new", "спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model = User fields", "'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance) class", "serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model = User fields = [", "update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if", "= User fields = [ 'username', 'email', 'password', ] def create(self, validated_data): new", "serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256,", "max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False,", "serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не сходятся') instance.set_password(validated_data['password'])", "is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())])", "Meta: model = User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self,", "= User fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self,", "serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta:", "email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля", "model = User fields = [ 'username', 'email', 'password', ] def create(self, validated_data):", "User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены", "from rest_framework import serializers from rest_framework.validators import UniqueValidator from Users.utils import is_moderator class", "User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] !=", "new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password", "max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model =", "password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model = User fields", "User fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance:", "RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email =", "fields = [ 'username', 'email', 'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'],", "= [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data): if not", "User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data):", "from django.contrib.auth.models import User from rest_framework import serializers from rest_framework.validators import UniqueValidator from", "serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model", "class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email", "def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно')", "instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password']", "min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False,", "Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256,", "для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True)", "from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator", "serializers from rest_framework.validators import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор", "min_length=6, max_length=256, write_only=True) class Meta: model = User fields = [ 'username', 'email',", "= serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6,", "import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера", "fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User):", "'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer):", "смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False,", "is_moderator = serializers.SerializerMethodField() class Meta: model = User fields = [ 'id', 'username',", "class Meta: model = User fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator',", "Meta: model = User fields = [ 'username', 'email', 'password', ] def create(self,", "\"\"\" Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False,", "not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли", "validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\"", "формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm =", "'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий", "\"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False,", "<filename>Users/serializers.py from django.contrib.auth.models import User from rest_framework import serializers from rest_framework.validators import UniqueValidator", "max_length=256, write_only=True) class Meta: model = User fields = [ 'username', 'email', 'password',", "ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256,", "пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False,", "rest_framework import serializers from rest_framework.validators import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer):", "write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256,", "new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password =", "'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для", "from rest_framework.validators import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового", "UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model", "пользователя \"\"\" username = serializers.CharField(max_length=128, validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password =", "class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6,", "validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']:", "min_length=6, max_length=256, write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model =", "import User from rest_framework import serializers from rest_framework.validators import UniqueValidator from Users.utils import", "allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model = User", "User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username =", "'username', 'email', 'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password'])", "'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise", "[ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']):", "model = User fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def", "Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model = User", "new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор", "new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False,", "введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не сходятся') instance.set_password(validated_data['password']) instance.save() return", "неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не сходятся') instance.set_password(validated_data['password']) instance.save() return instance", "create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer):", "class Meta: model = User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def", "instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username", "Meta: model = User fields = [ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ]", "email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class", "get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\"", "Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator =", "= serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta:", "'email', 'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save()", "allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password =", "\"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model =", "serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) old_password", "'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return", "serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model = User fields = [ 'password',", "= serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model = User fields =", "allow_blank=False, max_length=256, write_only=True) class Meta: model = User fields = [ 'password', 'password_<PASSWORD>',", "[ 'id', 'username', 'email', 'is_superuser', 'is_moderator', ] def get_is_moderator(self, instance: User): return is_moderator(instance)", "django.contrib.auth.models import User from rest_framework import serializers from rest_framework.validators import UniqueValidator from Users.utils", "import serializers from rest_framework.validators import UniqueValidator from Users.utils import is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\"", "is_moderator class UserSerializer(serializers.ModelSerializer): \"\"\" Сериализатор спискового представления юзера \"\"\" is_moderator = serializers.SerializerMethodField() class", "return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя \"\"\" username = serializers.CharField(max_length=128,", "= [ 'username', 'email', 'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email',", "write_only=True) old_password = serializers.CharField(allow_null=False, allow_blank=False, max_length=256, write_only=True) class Meta: model = User fields", "def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', '')) new.set_password(validated_data['password']) new.save() return new class", "if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise", "'old_password', ] def update(self, instance: User, validated_data): if not instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль", "instance.check_password(validated_data['old_password']): raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не", "пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не сходятся') instance.set_password(validated_data['password']) instance.save()", "class Meta: model = User fields = [ 'username', 'email', 'password', ] def", "[ 'username', 'email', 'password', ] def create(self, validated_data): new = User.objects.create(username=validated_data['username'], email=validated_data.get('email', ''))", "password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256,", "def get_is_moderator(self, instance: User): return is_moderator(instance) class RegisterSerializer(serializers.ModelSerializer): \"\"\" Сериализатор для рекистрации пользователя", "write_only=True) class Meta: model = User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ]", "\"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6,", "serializers.SerializerMethodField() class Meta: model = User fields = [ 'id', 'username', 'email', 'is_superuser',", "write_only=True) class Meta: model = User fields = [ 'username', 'email', 'password', ]", "validators=[UniqueValidator(queryset=User.objects.all())]) email = serializers.EmailField(required=False, max_length=256, allow_blank=True) password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True)", "model = User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance:", "User fields = [ 'username', 'email', 'password', ] def create(self, validated_data): new =", "'')) new.set_password(validated_data['password']) new.save() return new class ChangePasswordSerializer(serializers.ModelSerializer): \"\"\" Сериализатор формы смены пароля \"\"\"", "\"\"\" is_moderator = serializers.SerializerMethodField() class Meta: model = User fields = [ 'id',", "raise serializers.ValidationError('Текущий пароль введен неверно') if validated_data['password'] != validated_data['password_confirm']: raise serializers.ValidationError('Пароли не сходятся')", "allow_blank=False, min_length=6, max_length=256, write_only=True) class Meta: model = User fields = [ 'username',", "= serializers.SerializerMethodField() class Meta: model = User fields = [ 'id', 'username', 'email',", "= User fields = [ 'password', 'password_<PASSWORD>', 'old_password', ] def update(self, instance: User,", "Сериализатор формы смены пароля \"\"\" password = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm", "= serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True) password_confirm = serializers.CharField(allow_null=False, allow_blank=False, min_length=6, max_length=256, write_only=True)" ]
[ "range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v", "= I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N =", "i == 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame)", "by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if", "#Overwrite by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR", "I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([])", "< 0] = 0 frame[frame > 1] = 1 if IDims[0] > MAXHEIGHT:", "saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30): N = I.shape[0]", "= 0 frame[frame > 1] = 1 if IDims[0] > MAXHEIGHT: fac1 =", "> 1] = 1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 =", "in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I,", "subprocess.call(command) print(N + i + 1) #Convert to video using avconv for t", "IDims) frame[frame < 0] = 0 frame[frame > 1] = 1 if IDims[0]", "command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k',", "I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0]", "in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i]) i", "[\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I, IDims)", "1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac =", "os import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix,", "for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS,", "N+i+1)] print(command) subprocess.call(command) print(N + i + 1) #Convert to video using avconv", "v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V)", "mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH)", "= \", IDims) for i in range(N): frame = np.reshape(I[i, :], IDims) frame[frame", "frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\",", "np.reshape(I[i, :], IDims) frame[frame < 0] = 0 frame[frame > 1] = 1", "in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename):", "frame[frame > 1] = 1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2", "os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for", "%s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i + 1) #Convert", "i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\",", "60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command", "filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN,", "= [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename]", "fileprefix, ID, FrameRate = 30, NumberFrames = 30): N = I.shape[0] print(I.shape) if", "N = I.shape[0] frame = np.array([]) print(\"IDims = \", IDims) for i in", "frame = np.reshape(I[i, :], IDims) frame[frame < 0] = 0 frame[frame > 1]", "import os import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I, IDims,", "if i == 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1),", "= \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r',", "(I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i]) i = i + 1", "= I.shape[0] frame = np.array([]) print(\"IDims = \", IDims) for i in range(N):", "[\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"),", "1] = 1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1])", "> MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if", "ID, FrameRate = 30, NumberFrames = 30): N = I.shape[0] print(I.shape) if I.shape[0]", "using avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite", "subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999)", "#Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i", "30): N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :]", "t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i',", "NumberFrames = 30): N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I =", "+ 1) #Convert to video using avconv for t in [\"avi\", \"webm\", \"ogg\"]:", "if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r',", "print(command) subprocess.call(command) print(N + i + 1) #Convert to video using avconv for", "I = I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([]) print(\"IDims = \",", "i in range(N): frame = np.reshape(I[i, :], IDims) frame[frame < 0] = 0", "i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for v", "\"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command", "0 frame[frame > 1] = 1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0])", "PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in", "= MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i == 0:", "120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30): N", "os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims,", "20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i + 1)", "\"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N", "by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR +", "np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V", "\"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command)", "\"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N +", "= np.array([]) print(\"IDims = \", IDims) for i in range(N): frame = np.reshape(I[i,", "max(fac1, fac2) if i == 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac)", "in range(N): frame = np.reshape(I[i, :], IDims) frame[frame < 0] = 0 frame[frame", "MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate =", "= scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5:", "+ i + 1) #Convert to video using avconv for t in [\"avi\",", "= int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\",", "range(N): frame = np.reshape(I[i, :], IDims) frame[frame < 0] = 0 frame[frame >", "os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b',", "fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i ==", "import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID,", "= 160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30,", "default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png',", "VideoTools import * import subprocess import os import scipy.misc MAXHEIGHT = 160 MINWIDTH", "0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS =", "frame[frame < 0] = 0 frame[frame > 1] = 1 if IDims[0] >", "1) #Convert to video using avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename", "frame = np.array([]) print(\"IDims = \", IDims) for i in range(N): frame =", "for V in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i,", "FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([]) print(\"IDims =", "= 60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames):", "sys sys.path.append(\"../\") from VideoTools import * import subprocess import os import scipy.misc MAXHEIGHT", "= np.reshape(I[i, :], IDims) frame[frame < 0] = 0 frame[frame > 1] =", "IDims) for i in range(N): frame = np.reshape(I[i, :], IDims) frame[frame < 0]", "fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS =", "0] = 0 frame[frame > 1] = 1 if IDims[0] > MAXHEIGHT: fac1", "\"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command)", "command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\",", "\"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i + 1) #Convert to video using", "fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i == 0: print(\"Resizing by", "N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)]", "%s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i]) i = i +", "I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([]) print(\"IDims = \", IDims) for", "'%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames):", "int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\",", "range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60", "np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for v in", "TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for i", "= I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([]) print(\"IDims = \", IDims)", "def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30): N =", "'-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for", "IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2)", "\"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate,", "frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i", "for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0", "to video using avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix,", "print(\"IDims = \", IDims) for i in range(N): frame = np.reshape(I[i, :], IDims)", "'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i +", "import sys sys.path.append(\"../\") from VideoTools import * import subprocess import os import scipy.misc", "\"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename) command =", "= np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for", "Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i]) i =", "> FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0] frame = np.array([]) print(\"IDims", "np.array([]) print(\"IDims = \", IDims) for i in range(N): frame = np.reshape(I[i, :],", "FrameRate = 30, NumberFrames = 30): N = I.shape[0] print(I.shape) if I.shape[0] >", "scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate", "\"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1))", "ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i + 1) #Convert to", "'30000k', filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs", "MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N),", "\", IDims) for i in range(N): frame = np.reshape(I[i, :], IDims) frame[frame <", "I.shape[0] frame = np.array([]) print(\"IDims = \", IDims) for i in range(N): frame", "\"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up", "i+1), frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for", "MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i", "up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i =", "filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs =", "os.path.exists(filename): os.remove(filename) command = [AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate,", "in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos =", "import * import subprocess import os import scipy.misc MAXHEIGHT = 160 MINWIDTH =", "Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V)", "IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30): N = I.shape[0] print(I.shape)", "MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i == 0: print(\"Resizing by %g\"%fac) frame", "= 0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in Videos:", "t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if", "MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames =", "print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i]) i = i", "= [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I,", "subprocess import os import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def saveVideoID(I,", "frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1] >", "in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text", "for v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving %s...\"%V) (I, IDims) =", "\"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i + 1) #Convert to video", "V in Videos: print(\"Saving %s...\"%V) (I, IDims) = loadVideo(V) saveVideoID(I, IDims, \"NumberedVideos/%i\"%i, IDs[i])", "fac2) if i == 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR,", "print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60", "IDs = np.random.permutation(999) i = 0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")]", "== 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS", "for t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default", "fac = max(fac1, fac2) if i == 0: print(\"Resizing by %g\"%fac) frame =", "> MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR,", "= 30, NumberFrames = 30): N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5:", "video using avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t)", ":], IDims) frame[frame < 0] = 0 frame[frame > 1] = 1 if", "\"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR, N+i+1)] print(command) subprocess.call(command) print(N + i", "MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i == 0: print(\"Resizing", "'-b', '30000k', filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100)", "PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command = [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\",", "160 MINWIDTH = 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames", "'-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean", "= 120 def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30):", ":] N = I.shape[0] frame = np.array([]) print(\"IDims = \", IDims) for i", "if frame.shape[1] > MINWIDTH*1.5: PS = int(30.0*frame.shape[1]/MINWIDTH) for i in range(NumberFrames): command =", "'-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR,", "\"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID, \"'\"), \"%s%i.png\"%(TEMP_STR,", "if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac = max(fac1,", "print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0] frame", "= MINWIDTH/float(IDims[1]) fac = max(fac1, fac2) if i == 0: print(\"Resizing by %g\"%fac)", "+ '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command) #Clean up for i in", "i in range(N+NumberFrames): os.remove(\"%s%i.png\"%(TEMP_STR, i+1)) np.random.seed(100) IDs = np.random.permutation(999) i = 0 Videos", "= [\"convert\", \"%s%i.png\"%(TEMP_STR, N), \"-fill\", \"red\", \"-pointsize\", \"%i\"%PS, \"-draw\", 'text 20,60 %s%.3i%s'%(\"'\", ID,", "if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N = I.shape[0] frame =", "0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in Videos: print(\"Saving", "from VideoTools import * import subprocess import os import scipy.misc MAXHEIGHT = 160", "import subprocess import os import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120 def", "%g\"%fac) frame = scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1]", "= max(fac1, fac2) if i == 0: print(\"Resizing by %g\"%fac) frame = scipy.misc.imresize(frame,", "print(N + i + 1) #Convert to video using avconv for t in", "= 30): N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5,", "= 1 if IDims[0] > MAXHEIGHT: fac1 = MAXHEIGHT/float(IDims[0]) fac2 = MINWIDTH/float(IDims[1]) fac", "for i in range(N): frame = np.reshape(I[i, :], IDims) frame[frame < 0] =", "i = 0 Videos = [\"OrigVideos/%s\"%v for v in os.listdir(\"OrigVideos\")] for V in", "* import subprocess import os import scipy.misc MAXHEIGHT = 160 MINWIDTH = 120", "scipy.misc.imresize(frame, fac) mpimage.imsave(\"%s%i.png\"%(TEMP_STR, i+1), frame) PS = 60 if frame.shape[1] > MINWIDTH*1.5: PS", "i + 1) #Convert to video using avconv for t in [\"avi\", \"webm\",", "N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I = I[0:FrameRate*5, :] N", "[AVCONV_BIN, '-r', \"%i\"%FrameRate, '-i', TEMP_STR + '%d.png', '-r', \"%i\"%FrameRate, '-b', '30000k', filename] subprocess.call(command)", "sys.path.append(\"../\") from VideoTools import * import subprocess import os import scipy.misc MAXHEIGHT =", "#Convert to video using avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename =", "30, NumberFrames = 30): N = I.shape[0] print(I.shape) if I.shape[0] > FrameRate*5: I", "avconv for t in [\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by", "[\"avi\", \"webm\", \"ogg\"]: filename = \"%s.%s\"%(fileprefix, t) #Overwrite by default if os.path.exists(filename): os.remove(filename)" ]
[ "render_template site_admin = Blueprint(\"admin_site\", __name__, url_prefix=\"/admin\",template_folder=\"template\", static_folder=\"static\", static_url_path=\"/admin/static\") @site_admin.route(\"/\") def admin(): return render_template(\"index.html\")", "Blueprint, render_template site_admin = Blueprint(\"admin_site\", __name__, url_prefix=\"/admin\",template_folder=\"template\", static_folder=\"static\", static_url_path=\"/admin/static\") @site_admin.route(\"/\") def admin(): return", "flask import Blueprint, render_template site_admin = Blueprint(\"admin_site\", __name__, url_prefix=\"/admin\",template_folder=\"template\", static_folder=\"static\", static_url_path=\"/admin/static\") @site_admin.route(\"/\") def", "import Blueprint, render_template site_admin = Blueprint(\"admin_site\", __name__, url_prefix=\"/admin\",template_folder=\"template\", static_folder=\"static\", static_url_path=\"/admin/static\") @site_admin.route(\"/\") def admin():", "from flask import Blueprint, render_template site_admin = Blueprint(\"admin_site\", __name__, url_prefix=\"/admin\",template_folder=\"template\", static_folder=\"static\", static_url_path=\"/admin/static\") @site_admin.route(\"/\")" ]
[ "code printing tests.\"\"\" import pytest from diofant import (QQ, Catalan, Derivative, Dummy, E,", "mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e = Limit(sin(x)/x, x,", "diofant.abc import x, y, z __all__ = () f = Function('f') def test_Integer():", "MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls, x, y):", "(mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]')", "== 'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A B C D',", "oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity,", "x -> 0, Direction -> 1]]' e = Limit(sin(x)/x, x, 0, 'real') assert", "(x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x,", "0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo,", "'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]'", "C, D = symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert", "'{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1,", "oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, '", "== '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7'", "(1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x", "3, evaluate=False)) == 'Pi > 3' def test_Booleans(): assert mathematica_code(true) == 'True' assert", "8, [9, 10], 11]) == \\ '{1, 2, 3, {4, 5, {6, 7}},", "- 1 &, 4],' ' {1, 0, 3, 2, 1}]') def test_Limit(): e", "g = Piecewise((0, Or(x <= -1, x >= 1)), (1 - x, x", "> 3' def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def", "== '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert", "Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10)))", "y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]'", "'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C, D =", "{6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2, (3, 4))) == '{1,", "loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf, sech, sign, sin, sinh,", "> 0), (1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1", "E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max,", "== 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1},", "y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x,", "assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4},", "mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x))", "= Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def", "'x == y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 +", "= Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y), x", "== ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' ' {1, 0, 3, 2,", "mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 -", "def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert", "assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x,", "y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def", "-Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}'", "(x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x],", "assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert", "1, True}}]') def test_RootOf(): p = Poly(x**3 + y*x + 1, x) assert", "y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert", "test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0,", "assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) ==", "Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix,", "-> 1]]' e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x", "Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot,", "== 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert", "x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert", "a = K([1, 0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*#", "x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def test_Booleans(): assert mathematica_code(true)", "assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert", "Direction -> 1]]' e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x,", "mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x))", "def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1':", "lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y),", "mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def", "'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]'", "def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x -", "x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo,", "-1}, ' '{-x + 1, x > 0}, {x + 1, True}}]') def", "Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min,", "mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x))", "(y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow(): assert", "mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9))", "== 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert", "0), (1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 ||", "0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e =", "assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x,", "'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) ==", "'Function[{x, y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x],", "'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]'", "2, 3])) == '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]],", "assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert", "x + 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def test_Derivative():", "== 'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2, 3, [4,", "assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()),", "4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}'", "<filename>diofant/tests/printing/test_mathematica.py \"\"\"Mathematica code printing tests.\"\"\" import pytest from diofant import (QQ, Catalan, Derivative,", "' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m =", "Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational,", "-> 0, Direction -> 1]]' e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e)", "mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x))", "(3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]')", "@classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls, x, y): pass", "== 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C +", "== 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y,", "mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e = Limit(sin(x)/x, x,", "rf, sech, sign, sin, sinh, symbols, tan, tanh, true, zeta) from diofant.abc import", "== 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class", "assert mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x, y/(1 + y**2))) ==", "mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x))", "commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B'", "mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1", "meijerg, oo, pi, polygamma, polylog, re, rf, sech, sign, sin, sinh, symbols, tan,", "mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x))", "(QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda,", "assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x <= -1}, ' '{-x", "+ 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &,", "def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x))", "== 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2", "x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert", "-Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0,", "mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6, 7]],", "mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x))", "assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B,", "+ 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert", "y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert", "issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert", "'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3,", "mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2))", "(y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3,", "def test_Mul(): A, B, C, D = symbols('A B C D', commutative=False) assert", "\"\"\"Mathematica code printing tests.\"\"\" import pytest from diofant import (QQ, Catalan, Derivative, Dummy,", "'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\", "D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) ==", "== 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) ==", "factorial2, false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma,", "Lambda((x, y), x + 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]'", "** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) ==", "mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E)", "test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2", "'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2, 3, [4, 5,", "4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1,", "x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y), x + 2*y)", "mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0, Or(x <= -1, x >=", "(mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x <= -1}, ' '{-x +", "= Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 +", "x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4,", "Sum, Tuple, acos, acosh, acot, acoth, asin, asinh, atan, atanh, binomial, conjugate, cos,", "x > 0}, {x + 1, True}}]') def test_RootOf(): p = Poly(x**3 +", "== 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod def", "exp, factorial, factorial2, false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo,", "mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x))", "'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1, 3) K", "'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,),", "4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3]))", "import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral,", "+ x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x <=", "y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x,", "== 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x):", "pi, polygamma, polylog, re, rf, sech, sign, sin, sinh, symbols, tan, tanh, true,", "e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction", "'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function):", "'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert", "cos, cosh, cot, coth, csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper,", "RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin, asinh, atan, atanh,", "mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C", "assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def", "Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x,", "y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) ==", "1]]' e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x ->", "assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d =", "y), x + 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def", "== '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) ==", "assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def", "y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls,", "user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x,", "Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction ->", "'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x))", "true, zeta) from diofant.abc import x, y, z __all__ = () f =", "'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x,", "4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) ==", "mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]})", "== 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) ==", "10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo),", "x >= 1 || x <= -1}, ' '{-x + 1, x >", "'Root[#^3 + #*y + 1 &, 1]' def test_RootSum(): r = RootSum(x**3 +", "'-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2'", "3*x - 1, 3) K = QQ.algebraic_field(r) a = K([1, 0, 3, 2,", "&, 1]' def test_RootSum(): r = RootSum(x**3 + x + 3, Lambda(y, log(y*z)))", "conjugate, cos, cosh, cot, coth, csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma,", "assert mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function():", "== '3/7' assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3,", "log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]')", "== 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2'", "0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e", "7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2, (3, 4))) == '{1, 2,", "test_Mul(): A, B, C, D = symbols('A B C D', commutative=False) assert mathematica_code(x*y*z)", "x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> Reals]]'", "1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x", "'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]'", "def test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x ->", "\\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1,", "== \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]'", "10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\", "3' def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def test_Piecewise():", "== 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) ==", "'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def", "11}' assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}' assert mathematica_code([1])", "y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2))", "assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' ' {1, 0,", "assert mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) ==", "test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m)", "y/(1 + y**2))) == 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) ==", "cot, coth, csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper, im, log,", "x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert", "assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) ==", "'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) ==", "mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 -", "assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x +", "test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3,", "== 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) ==", "assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert", "class MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls, x,", "x >= 1)), (1 - x, x > 0), (1 + x, True))", "acoth, asin, asinh, atan, atanh, binomial, conjugate, cos, cosh, cot, coth, csch, erfc,", "mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi >", "-> 0, Direction -> -1]]' e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e)", "'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma'", "y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 =", "== 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) ==", "K([1, 0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1", "Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]'", "'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity'", "+ x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x", "{x + 1, True}}]') def test_RootOf(): p = Poly(x**3 + y*x + 1,", "== 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert", "y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A,", "test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g =", "Direction -> -1]]' e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x,", "'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]'", "assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x))", "'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]'", "r = RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x},", "+ D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert", "== 'False' def test_Piecewise(): g = Piecewise((0, Or(x <= -1, x >= 1)),", "3])) == '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]'", "acosh, acot, acoth, asin, asinh, atan, atanh, binomial, conjugate, cos, cosh, cot, coth,", "10], 11]) == \\ '{1, 2, 3, {4, 5, {6, 7}}, 8, {9,", "1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x))", "== 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) ==", "y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass class", "\\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x],", "7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d') assert", "x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) ==", "eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'})", "x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y,", "'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]'", "asinh, atan, atanh, binomial, conjugate, cos, cosh, cot, coth, csch, erfc, exp, factorial,", "y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344", "{x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) ==", "'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]'", "def test_Piecewise(): g = Piecewise((0, Or(x <= -1, x >= 1)), (1 -", "assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x,", "mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]'", "x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x,", "3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2),", "*x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2) assert", "- y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2),", "re, rf, sech, sign, sin, sinh, symbols, tan, tanh, true, zeta) from diofant.abc", "assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert", "assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert", "+ 1 &, 1]' def test_RootSum(): r = RootSum(x**3 + x + 3,", "mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \\", "mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y))", "== mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) ==", "user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) ==", "mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]'", "+ 3*x - 1, 3) K = QQ.algebraic_field(r) a = K([1, 0, 3,", "def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g", "= Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction", "cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]'", "mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x,", "y, y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)'", "Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos,", "mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x", "mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi'", "== 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) ==", "== 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2)))", "{9, 10}, 11}' assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}'", "from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside,", "z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x),", "((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1,", "mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) ==", "3*# - 1 &, 4],' ' {1, 0, 3, 2, 1}]') def test_Limit():", "z)) == 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x))", "sin, sinh, symbols, tan, tanh, true, zeta) from diofant.abc import x, y, z", "1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}},", "assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x", "'-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E'", "== 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert", "{x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x,", "assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e = Limit(sin(x)/x,", "mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y, z]'", "C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B)", "== 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x))", "'3/7' assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x)", "[4, 5, [6, 7]], 8, [9, 10], 11]) == \\ '{1, 2, 3,", "D = symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A)", "== 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x))", "'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix():", "assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert", "mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def test_Booleans(): assert mathematica_code(true) == 'True'", "2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2,", "evaluate=False)) == 'Pi > 3' def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false)", "- x, x > 0), (1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0,", "== 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]'", "mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x))", "mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x,", "== 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) ==", "assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18,", "assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y,", "y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix())", "assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi", "mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &, 1]' def test_RootSum(): r", "[(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2)", "RootOf(x**7 + 3*x - 1, 3) K = QQ.algebraic_field(r) a = K([1, 0,", "assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert", "mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C", "y*x + 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1", "== 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) ==", "== 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x,", "3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y))", "x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &, 1]' def", "y, z __all__ = () f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) ==", "== '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2,", "'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert", "!= (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi,", "'False' def test_Piecewise(): g = Piecewise((0, Or(x <= -1, x >= 1)), (1", "x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3,", "mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma)", "{4, 5, {6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2, (3, 4)))", "assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7))", "[3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m =", "test_RootSum(): r = RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) ==", "mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m)", "== 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert", "Tuple, acos, acosh, acot, acoth, asin, asinh, atan, atanh, binomial, conjugate, cos, cosh,", "+ 1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False))", "assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x))", "assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert", "D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo)", "mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C)", "oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity,", "test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo,", "== '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3, 7)) ==", "x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x,", "y]' def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2", "\\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3))", "== 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) ==", "mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf, sech, sign, sin, sinh, symbols,", "assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert", "'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]'", "x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x", "erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg,", "x <= -1}, ' '{-x + 1, x > 0}, {x + 1,", "class MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']}))", "'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum():", "2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x,", "'{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1,", "mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable())", "()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2,", "mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]'", "{x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo)))", "== 'Piecewise[{{0, x >= 1 || x <= -1}, ' '{-x + 1,", "-oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y,", "== 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) ==", "mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x))", "mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x))", "f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y),", "x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y,", "sinh, symbols, tan, tanh, true, zeta) from diofant.abc import x, y, z __all__", "y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y,", "'Piecewise[{{0, x >= 1 || x <= -1}, ' '{-x + 1, x", "test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x))", "+ #*y + 1 &, 1]' def test_RootSum(): r = RootSum(x**3 + x", "#*y + 1 &, 1]' def test_RootSum(): r = RootSum(x**3 + x +", "mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]'", "SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin, asinh, atan, atanh, binomial, conjugate,", "x^2]' f2 = Lambda((x, y), x + 2*y) assert mathematica_code(f2) == 'Function[{x, y},", "Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols():", "+ y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)'", "assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def test_Booleans(): assert mathematica_code(true) ==", "1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' ' {1,", "3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],'", "test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x,", "mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]'", "D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo)", "= Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction ->", "mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants():", "assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert", "Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational():", "assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) ==", "+ 1, x > 0}, {x + 1, True}}]') def test_RootOf(): p =", "'Function[{x}, x^2]' f2 = Lambda((x, y), x + 2*y) assert mathematica_code(f2) == 'Function[{x,", "1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &, 1]'", "= Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction", "2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1,", "<= -1, x >= 1)), (1 - x, x > 0), (1 +", "assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y + 1 &, 1]' def test_RootSum():", "assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x,", "assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo),", "== 'Function[{x}, x^2]' f2 = Lambda((x, y), x + 2*y) assert mathematica_code(f2) ==", "def test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1, 3) K = QQ.algebraic_field(r)", "2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert", "test_RootOf(): p = Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p, 0)) ==", "2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' '", "assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) ==", "SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational(): assert", "{3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) ==", "assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A", "1 &, 4],' ' {1, 0, 3, 2, 1}]') def test_Limit(): e =", "'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x,", "1]' def test_RootSum(): r = RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert", "= QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7", "== \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2,", "assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def", "mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) ==", "assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1),", "'{-x + 1, x > 0}, {x + 1, True}}]') def test_RootOf(): p", "+ y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)'", "mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) ==", "'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert", "'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]'", "# issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]'", "'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) ==", "mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable())", "mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x))", "== 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3,", "'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x,", "'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]'", "acos, acosh, acot, acoth, asin, asinh, atan, atanh, binomial, conjugate, cos, cosh, cot,", "pytest from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt,", "assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e = Limit(sin(x)/x,", "def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) ==", "acot, acoth, asin, asinh, atan, atanh, binomial, conjugate, cos, cosh, cot, coth, csch,", "mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert", "assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x,", "'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A'", "x > 0), (1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >=", "4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m = SparseMatrix(m)", "3, {4, 5, {6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2, (3,", "mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan)", "== '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert", "assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2", "== '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def test_Integral(): assert", "'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x,", ">= 1 || x <= -1}, ' '{-x + 1, x > 0},", "' '{-x + 1, x > 0}, {x + 1, True}}]') def test_RootOf():", "assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]'", "x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e", "'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi)", "(x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity,", "'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4),", "'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x))", "True}}]') def test_RootOf(): p = Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p,", "|| x <= -1}, ' '{-x + 1, x > 0}, {x +", "<= -1}, ' '{-x + 1, x > 0}, {x + 1, True}}]')", "assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert", "assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}' assert mathematica_code([1]) ==", "assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}},", "K = QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1]) assert mathematica_code(a) ==", "mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def", "= symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) ==", "assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)'", "= Lambda((x, y), x + 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x +", "-1]]' e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x ->", "2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m", "+ y*x + 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y +", "== 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert", "+ 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x -", "'2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x", "== 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]'", "== 'Function[{x, y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) ==", "== 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e = Limit(sin(x)/x, x, 0,", "== 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) ==", "== 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) ==", "y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x),", "sech, sign, sin, sinh, symbols, tan, tanh, true, zeta) from diofant.abc import x,", "mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]'", "mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3,", "'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]'", "false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog,", "user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda", "B, C, D = symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z'", "2, (3, 4))) == '{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert", "mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x))", "-> -1]]' e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x", "r = RootOf(x**7 + 3*x - 1, 3) K = QQ.algebraic_field(r) a =", "def test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}'", "'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]'", "== 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y,", "eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda:", "{3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) ==", "'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]'", "== 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) ==", "Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3, 4444]])", "tests.\"\"\" import pytest from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma,", "2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 -", "'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x))", "mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d')", "assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0,", "assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert", "== 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e = Limit(sin(x)/x, x, 0,", "pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert", "test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2", "'(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d) ==", "0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) ==", "== '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert", "fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re,", "test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11])", "y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert", "2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \\ '{1,", "9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7'", "sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x))", "(3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y))", "assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert", "== 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) ==", "5, {6, 7}}, 8, {9, 10}, 11}' assert mathematica_code((1, 2, (3, 4))) ==", "def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert", "mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement():", "test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3,", "('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' ' {1, 0, 3, 2, 1}]')", "x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]'", "mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}'", "' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1, 3)", "mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x))", "== 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4,", "2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x),", "7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert", "== 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) ==", "('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r =", "B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert", "def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 =", "mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x))", "== 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x],", "'-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3, 7)) == 'x", "Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y), x +", "y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 + 1))' assert", "pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x,", "assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x],", "assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]') def", "== 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x)", "- y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2),", "y)) == 'x == y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x !=", "assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) ==", "f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1'", "(y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\", "mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y), x + 2*y) assert mathematica_code(f2)", "def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3, 4444]]) assert", "&, 4],' ' {1, 0, 3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x,", "'{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert", "mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x))", "() f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) ==", "mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x))", "y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert", "- y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert", "+ x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 +", "Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le,", "0, Direction -> -1]]' e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) ==", "asin, asinh, atan, atanh, binomial, conjugate, cos, cosh, cot, coth, csch, erfc, exp,", "assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7))", "-oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity},", "im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf, sech, sign,", "assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) ==", "z __all__ = () f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67'", "mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) ==", "{}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3},", "== 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert", "(3, 4))) == '{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,))", "mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x))", "mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x,", "x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3),", "+ y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul():", "== 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y))", "zeta) from diofant.abc import x, y, z __all__ = () f = Function('f')", "Piecewise((0, Or(x <= -1, x >= 1)), (1 - x, x > 0),", "assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda():", "4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert", "y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert", "{3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2,", "test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity'", "Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1, 3) K =", "csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code,", "mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]'", "mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]'", "+ 3*# - 1 &, 4],' ' {1, 0, 3, 2, 1}]') def", "oo, pi, polygamma, polylog, re, rf, sech, sign, sin, sinh, symbols, tan, tanh,", "0, 3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e)", "mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x))", "mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7'", "x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) ==", "x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x],", "mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x))", "mathematica_code(Derivative(sin(x)*y**4, (x, 2))) == 'Hold[D[y^4*Sin[x], x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) ==", "'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo,", "'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x}, x^2]'", "\\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def", "assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) ==", "\\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def", "'{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2],", "mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]'", "m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2},", "== 'Root[#^3 + #*y + 1 &, 1]' def test_RootSum(): r = RootSum(x**3", "== 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x)) ==", "2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]'", "'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]'", "'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y))", "import x, y, z __all__ = () f = Function('f') def test_Integer(): assert", "Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum,", "def eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError,", "'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 +", "y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) ==", "- 1, 3) K = QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1])", "[9, 10], 11]) == \\ '{1, 2, 3, {4, 5, {6, 7}}, 8,", "(mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1},", "\\ '{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}' assert", "'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan'", "Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin, asinh,", "x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x,", "== 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C, D", "'67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert mathematica_code(Rational(3, 7)) == '3/7' assert", "0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &,", "3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \\ '{1, 2,", "== '{}' m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) ==", "-oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y,", "'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]'", "5, [6, 7]], 8, [9, 10], 11]) == \\ '{1, 2, 3, {4,", "== f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert", "diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer,", "oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2 - y^2), {x, -Infinity, Infinity}, '", "x, x > 0), (1 + x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x", "assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert", "0, Direction -> 1]]' e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) ==", "== '{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x ==", "== y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2 + 1))'", "mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def test_containers():", "assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert", "[6, 7]], 8, [9, 10], 11]) == \\ '{1, 2, 3, {4, 5,", "== 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)),", "binomial, conjugate, cos, cosh, cot, coth, csch, erfc, exp, factorial, factorial2, false, fibonacci,", "1 || x <= -1}, ' '{-x + 1, x > 0}, {x", "assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) ==", "assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert", "7]], 8, [9, 10], 11]) == \\ '{1, 2, 3, {4, 5, {6,", "f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x)", "'0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def test_Booleans():", "def test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10],", "3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1,", "x)) == 'MeijerG[{{1, 1}, {3, 4}}, {{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3),", "2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y' assert", "== 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert", "== 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) ==", "test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) ==", "cosh, cot, coth, csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper, im,", "'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4, (x, 2))) ==", "d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z))", "x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7", "4],' ' {1, 0, 3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x,", "Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]'", "Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne, Or,", "y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]'", "Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3 + #*y", "2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2},", "Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin,", "sign, sin, sinh, symbols, tan, tanh, true, zeta) from diofant.abc import x, y,", "mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x +", "Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin, asinh, atan,", "<= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def test_Booleans(): assert", "= Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3,", "mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue", "assert mathematica_code(Sum(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Sum[E^(-x^2", "mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A B", "log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf, sech, sign, sin,", "tan, tanh, true, zeta) from diofant.abc import x, y, z __all__ = ()", "== \\ '{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}'", "'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) ==", "== '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3' def", "== 'ArcTan[x]' assert mathematica_code(acot(x)) == 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) ==", "'{}' m = Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1,", "x -> 0, Direction -> -1]]' e = Limit(sin(x)/x, x, 0, '-') assert", "assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x,", "mathematica_code(x + Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x'", "Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly,", "4))) == '{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) ==", "- y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert", "1))' assert mathematica_code(Le(0, x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) ==", "'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) ==", "-Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0,", "+ D)*A*y) == 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert", "p = Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p, 0)) == 'Root[#^3", "e = Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0,", "test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def", "'{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x,", "pass class MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1':", "10}, 11}' assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3, 4}}' assert", "y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert", "4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x, y/(1", "4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x,", "'-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e =", "atanh, binomial, conjugate, cos, cosh, cot, coth, csch, erfc, exp, factorial, factorial2, false,", "== 'Pi > 3' def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) ==", "mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x,", "'E' def test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8, [9,", "'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]' e = Limit(sin(x)/x, x, 0, 'real')", "user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) ==", "+ 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def test_Derivative(): assert", "assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert", "2*y) assert mathematica_code(f2) == 'Function[{x, y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x),", "assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) ==", "mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod", "assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' #", "== 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x)) == 'Zeta[x]' assert mathematica_code(Heaviside(x))", "3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) ==", "x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3) ==", "'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) ==", "mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1,", "'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1,", "= () f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1))", "+ Rational(3, 7)) == 'x + 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def", "3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d", "== mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) ==", "'(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) ==", "3) K = QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1]) assert mathematica_code(a)", "= K([1, 0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# -", "assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x: False,", "printing tests.\"\"\" import pytest from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq,", "y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y))", "'Sech[x]' assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]'", "3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], '", "assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert", "assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function):", "== '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d)", "'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass", "x, y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3))", "3)) == 'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A B C", "'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y,", "x): pass class MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x),", "def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x,", "hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf, sech,", "2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0) assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x,", "== 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]' # issue sympy/sympy#15344 assert", "8, {9, 10}, 11}' assert mathematica_code((1, 2, (3, 4))) == '{1, 2, {3,", "'LogGamma[x]' class MyFunc1(Function): @classmethod def eval(cls, x): pass class MyFunc2(Function): @classmethod def eval(cls,", "assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert", "mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y))", "+ y**2))) == 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0", "'Max[2, x]' # issue sympy/sympy#15344 assert mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x))", "x, y, z __all__ = () f = Function('f') def test_Integer(): assert mathematica_code(Integer(67))", "mathematica_code(binomial(x, y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]'", "Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh,", "{3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x,", "mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y,", "assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) ==", "y}, x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x,", "QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1]) assert mathematica_code(a) == ('AlgebraicNumber[Root[#^7 +", "Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple,", "assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) == 'LogGamma[x]' class MyFunc1(Function): @classmethod", "x + 2*y]' def test_Derivative(): assert mathematica_code(Derivative(f(x), x, x)) == 'Hold[D[f[x], x, x]]'", "assert mathematica_code(Derivative(sin(x)*y**4, x, y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x,", "assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0,", "x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) ==", "def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x, y/(1 +", "'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) ==", "(y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\", "polylog, re, rf, sech, sign, sin, sinh, symbols, tan, tanh, true, zeta) from", "EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne,", "y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2, x]'", "'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C'", "x**2)) == '0 <= x^2' assert mathematica_code(Gt(pi, 3, evaluate=False)) == 'Pi > 3'", "assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert", "== 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1) == 'Function[{x},", "== '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) ==", "y)) == 'Binomial[x, y]' assert mathematica_code(log(x)) == 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert", "mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x, x)) == 'Hold[D[x, x]]' assert mathematica_code(Derivative(sin(x)*y**4,", "e = Limit(sin(x)/x, x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0,", "coth, csch, erfc, exp, factorial, factorial2, false, fibonacci, gamma, hyper, im, log, loggamma,", "y, x)) == 'Hold[D[y^4*Sin[x], x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x))", "mathematica_code(x**-1.0) == 'x^(-1.0)' assert mathematica_code(x**Rational(2, 3)) == 'x^(2/3)' def test_Mul(): A, B, C,", "assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]' assert mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) ==", "== 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y) == 'x*y*A**B**(C +", "test_AlgebraicElement(): r = RootOf(x**7 + 3*x - 1, 3) K = QQ.algebraic_field(r) a", "= RootOf(x**7 + 3*x - 1, 3) K = QQ.algebraic_field(r) a = K([1,", "assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert", "'{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}'", "3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2, 3}, {3, 4}, x]') assert mathematica_code(Min(x,", "== 'Log[x]' assert mathematica_code(tan(x)) == 'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) ==", "mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]' assert mathematica_code(polygamma(x, y)) == 'PolyGamma[x, y]'", "Lambda, Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf, RootSum,", "assert mathematica_code(x**3) == 'x^3' assert mathematica_code(x**(y**3)) == 'x^(y^3)' assert mathematica_code(1/(f(x)*3.5)**(x - y**x)/(x**2 +", "Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix,", "Limit(sin(x)/x, x, 0, 'real') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction ->", ">= 1)), (1 - x, x > 0), (1 + x, True)) assert", "mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) == 'Sech[x]' assert mathematica_code(csch(x))", "Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise, Poly, Rational, RootOf,", "{{1}, {}}, x]') assert (mathematica_code(hyper((1, 2, 3), (3, 4), x)) == 'HypergeometricPFQ[{1, 2,", "== 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) ==", "== '{1, 2, {3, 4}}' assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}'", "Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y},", "m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def", "def test_RootSum(): r = RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert mathematica_code(r)", "assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert mathematica_code(acosh(x)) == 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert", "mathematica_code(factorial(x)) == 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]'", "== '{{1, 2}, {3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) ==", "Or(x <= -1, x >= 1)), (1 - x, x > 0), (1", "assert mathematica_code(Rational(3, 7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7))", "== 'E' def test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6, 7]], 8,", "Or, Piecewise, Poly, Rational, RootOf, RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth,", "== ('RootSum[Function[{x}, x^3 + x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r", "assert mathematica_code(csch(x)) == 'Csch[x]' assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert", "'Tan[x]' assert mathematica_code(cot(x)) == 'Cot[x]' assert mathematica_code(asin(x)) == 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]'", "== 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo)))", "'{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2", "== 'ArcSin[x]' assert mathematica_code(acos(x)) == 'ArcCos[x]' assert mathematica_code(atan(x)) == 'ArcTan[x]' assert mathematica_code(acot(x)) ==", "' \\ '{y, -Infinity, Infinity}]]' def test_Sum(): assert mathematica_code(Sum(sin(x), (x, 0, 10))) ==", "A, B, C, D = symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) ==", "mathematica_code(x) == 'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert", "'x*y*A' assert mathematica_code(x*y*A*B) == 'x*y*A**B' assert mathematica_code(x*y*A*B*C) == 'x*y*A**B**C' assert mathematica_code(x*A*B*(C + D)*A*y)", "mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2':", "assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3, 7)) == 'x +", "'{{1, 2}, {3, 4444}}' def test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y'", "== 'ArcCosh[x]' assert mathematica_code(atanh(x)) == 'ArcTanh[x]' assert mathematica_code(acoth(x)) == 'ArcCoth[x]' assert mathematica_code(sech(x)) ==", "'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert mathematica_code(gamma(x)) == 'Gamma[x]' assert mathematica_code(zeta(x))", "mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 - y**2),", "-Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m = Matrix([[1, 2], [3,", "mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2, 3,", "'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0, Or(x <= -1,", "= Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x, y, z)) ==", "0}, {x + 1, True}}]') def test_RootOf(): p = Poly(x**3 + y*x +", "7)) == '3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7'", "3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3)", "'3/7' assert mathematica_code(Rational(18, 9)) == '2' assert mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3,", "x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) == \\", "y**2))) == 'x != (y/(y^2 + 1))' assert mathematica_code(Le(0, x**2)) == '0 <=", "assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh':", "x + 3], ' 'Function[{y}, Log[y*z]]]') def test_AlgebraicElement(): r = RootOf(x**7 + 3*x", "+ 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x + 3],", "== 'x*y*A**B**(C + D)**A' def test_constants(): assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) ==", "== 'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) ==", "assert mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0, Or(x <= -1, x", "' {1, 0, 3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0)", "f2 = Lambda((x, y), x + 2*y) assert mathematica_code(f2) == 'Function[{x, y}, x", "'f[x, y, z]' assert mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]'", "assert mathematica_code([1]) == '{1}' assert mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) ==", "1)), (1 - x, x > 0), (1 + x, True)) assert (mathematica_code(g)", "True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x <= -1}, '", "= SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}' def test_Relational():", "mathematica_code((1,)) == '{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def test_Integral():", "+ 1, True}}]') def test_RootOf(): p = Poly(x**3 + y*x + 1, x)", "{3, 4444}}' m = SparseMatrix(m) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3,", "atan, atanh, binomial, conjugate, cos, cosh, cot, coth, csch, erfc, exp, factorial, factorial2,", "'x^(2/3)' def test_Mul(): A, B, C, D = symbols('A B C D', commutative=False)", "assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2, 3, [4, 5, [6,", "(x, -oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity,", "assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert", "== 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow(): assert mathematica_code(x**3) == 'x^3'", "def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert mathematica_code(Integer(-1)) == '-1' def test_Rational(): assert", "assert mathematica_code(Catalan) == 'Catalan' assert mathematica_code(E) == 'E' def test_containers(): assert mathematica_code([1, 2,", "11]) == \\ '{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10},", "-7)) == '3/7' assert mathematica_code(x + Rational(3, 7)) == 'x + 3/7' assert", "assert mathematica_code(erfc(x)) == 'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert", "x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]'", "-7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3, 7))", "= RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3", "'Hold[Limit[Sin[x]/x, x -> 0, Direction -> -1]]' e = Limit(sin(x)/x, x, 0, '-')", "mathematica_code(a) == ('AlgebraicNumber[Root[#^7 + 3*# - 1 &, 4],' ' {1, 0, 3,", "2, 3}, {3, 4}, x]') assert mathematica_code(Min(x, y)) == 'Min[x, y]' assert mathematica_code(Max(x,", "'Pi > 3' def test_Booleans(): assert mathematica_code(true) == 'True' assert mathematica_code(false) == 'False'", "Gt, Heaviside, Integer, Integral, Lambda, Le, Limit, Matrix, Max, Min, Ne, Or, Piecewise,", "tanh, true, zeta) from diofant.abc import x, y, z __all__ = () f", "== 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) ==", "Infinity}, ' \\ '{y, -Infinity, Infinity}]]' def test_Matrix(): assert mathematica_code(Matrix()) == '{}' m", "'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))) ==", "== 'Zeta[x]' assert mathematica_code(Heaviside(x)) == 'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y))", "-oo, oo), (y, -oo, oo))) == \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity},", "assert mathematica_code(pi) == 'Pi' assert mathematica_code(oo) == 'Infinity' assert mathematica_code(-oo) == '-Infinity' assert", "False, 'Myfunc2')]}) == 'MyFunc2[x, y]' def test_Lambda(): f1 = Lambda(x, x**2) assert mathematica_code(f1)", "'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1, 1), (3, 4)), ((1,), ()), x)) == 'MeijerG[{{1,", "Derivative, Dummy, E, Eq, EulerGamma, Function, Gt, Heaviside, Integer, Integral, Lambda, Le, Limit,", "mathematica_code(Integral(sin(sin(x)), x)) == 'Hold[Integrate[Sin[Sin[x]], x]]' assert mathematica_code(Integral(exp(-x**2 - y**2), (x, -oo, oo), (y,", "'{1}' assert mathematica_code(Tuple(*[1, 2, 3])) == '{1, 2, 3}' def test_Integral(): assert mathematica_code(Integral(sin(sin(x)),", "'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'}) == 'ArcTanh[x]' assert (mathematica_code(meijerg(((1,", "> 0}, {x + 1, True}}]') def test_RootOf(): p = Poly(x**3 + y*x", "RootSum, SparseMatrix, Sum, Tuple, acos, acosh, acot, acoth, asin, asinh, atan, atanh, binomial,", "- y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0)", "== \\ 'Hold[Integrate[E^(-x^2 - y^2), {x, -Infinity, Infinity}, ' \\ '{y, -Infinity, Infinity}]]'", "== 'x' d = Dummy('d') assert mathematica_code(d) == f'd{d.dummy_index}' def test_Function(): assert mathematica_code(f(x,", "y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]'", "1, 3) K = QQ.algebraic_field(r) a = K([1, 0, 3, 2, 1]) assert", "symbols, tan, tanh, true, zeta) from diofant.abc import x, y, z __all__ =", "def test_RootOf(): p = Poly(x**3 + y*x + 1, x) assert mathematica_code(RootOf(p, 0))", "test_Piecewise(): g = Piecewise((0, Or(x <= -1, x >= 1)), (1 - x,", "x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y, y, y, x]]' def test_Pow():", "test_Relational(): assert mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x, y/(1 + y**2)))", "gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi, polygamma, polylog, re, rf,", "= Piecewise((0, Or(x <= -1, x >= 1)), (1 - x, x >", "'UnitStep[x]' assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x))", "symbols('A B C D', commutative=False) assert mathematica_code(x*y*z) == 'x*y*z' assert mathematica_code(x*y*A) == 'x*y*A'", "x, 0, '-') assert mathematica_code(e) == 'Hold[Limit[Sin[x]/x, x -> 0, Direction -> 1]]'", "assert mathematica_code(-oo) == '-Infinity' assert mathematica_code(EulerGamma) == 'EulerGamma' assert mathematica_code(Catalan) == 'Catalan' assert", "0)) == 'Root[#^3 + #*y + 1 &, 1]' def test_RootSum(): r =", "1, x > 0}, {x + 1, True}}]') def test_RootOf(): p = Poly(x**3", "polygamma, polylog, re, rf, sech, sign, sin, sinh, symbols, tan, tanh, true, zeta)", "import pytest from diofant import (QQ, Catalan, Derivative, Dummy, E, Eq, EulerGamma, Function,", "__all__ = () f = Function('f') def test_Integer(): assert mathematica_code(Integer(67)) == '67' assert", "mathematica_code(Eq(x, y)) == 'x == y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x", "'Min[x, y]' assert mathematica_code(Max(x, y)) == 'Max[x, y]' assert mathematica_code(Max(x, 2)) == 'Max[2,", "== 'x == y' assert mathematica_code(Ne(x, y/(1 + y**2))) == 'x != (y/(y^2", "mathematica_code(true) == 'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0, Or(x", "-1, x >= 1)), (1 - x, x > 0), (1 + x,", "assert mathematica_code(Sum(sin(x), (x, 0, 10))) == 'Hold[Sum[Sin[x], {x, 0, 10}]]' assert mathematica_code(Sum(exp(-x**2 -", "def test_Function(): assert mathematica_code(f(x, y, z)) == 'f[x, y, z]' assert mathematica_code(sin(x) **", "['Myfunc1']})) assert mathematica_code(MyFunc1(x), user_functions={'MyFunc1': 'Myfunc1'}) == 'Myfunc1[x]' assert mathematica_code(MyFunc2(x, y), user_functions={'MyFunc2': [(lambda *x:", "x)) == 'Hold[D[f[x], x, x]]' assert mathematica_code(Derivative(sin(x), x)) == 'Hold[D[Sin[x], x]]' assert mathematica_code(Derivative(x,", "assert mathematica_code(tanh(x)) == 'Tanh[x]' assert mathematica_code(coth(x)) == 'Coth[x]' assert mathematica_code(asinh(x)) == 'ArcSinh[x]' assert", "mathematica_code(Rational(3, -7)) == '-3/7' assert mathematica_code(Rational(-3, -7)) == '3/7' assert mathematica_code(x + Rational(3,", "factorial, factorial2, false, fibonacci, gamma, hyper, im, log, loggamma, mathematica_code, meijerg, oo, pi,", "== 'Factorial[x]' assert mathematica_code(factorial2(x)) == 'Factorial2[x]' assert mathematica_code(rf(x, y)) == 'Pochhammer[x, y]' assert", "1 &, 1]' def test_RootSum(): r = RootSum(x**3 + x + 3, Lambda(y,", "assert mathematica_code(f1) == 'Function[{x}, x^2]' f2 = Lambda((x, y), x + 2*y) assert", "x, y, x]]' assert mathematica_code(Derivative(sin(x)*y**4, x, (y, 3), x)) == 'Hold[D[y^4*Sin[x], x, y,", "assert mathematica_code(fibonacci(x)) == 'Fibonacci[x]' assert mathematica_code(polylog(x, y)) == 'PolyLog[x, y]' assert mathematica_code(loggamma(x)) ==", "y**x)/(x**2 + y)) == \\ '(3.5*f[x])^(-x + y^x)/(x^2 + y)' assert mathematica_code(x**-1.0) ==", "Matrix([[1, 2], [3, 4444]]) assert mathematica_code(m) == mathematica_code(m.as_immutable()) == '{{1, 2}, {3, 4444}}'", "== 'True' assert mathematica_code(false) == 'False' def test_Piecewise(): g = Piecewise((0, Or(x <=", "from diofant.abc import x, y, z __all__ = () f = Function('f') def", "{1, 0, 3, 2, 1}]') def test_Limit(): e = Limit(sin(x)/x, x, 0) assert", "'Erfc[x]' assert mathematica_code(conjugate(x)) == 'Conjugate[x]' assert mathematica_code(re(x)) == 'Re[x]' assert mathematica_code(im(x)) == 'Im[x]'", "RootSum(x**3 + x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 +", "x + 3, Lambda(y, log(y*z))) assert mathematica_code(r) == ('RootSum[Function[{x}, x^3 + x +", "mathematica_code(sin(x) ** cos(x)) == 'Sin[x]^Cos[x]' assert mathematica_code(sign(x)) == 'Sign[x]' assert mathematica_code(atanh(x), user_functions={'atanh': 'ArcTanh'})", "'ArcCot[x]' assert mathematica_code(sinh(x)) == 'Sinh[x]' assert mathematica_code(cosh(x)) == 'Cosh[x]' assert mathematica_code(tanh(x)) == 'Tanh[x]'", "(1 - x, x > 0), (1 + x, True)) assert (mathematica_code(g) ==", "@classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert mathematica_code(MyFunc1(x),", "x, True)) assert (mathematica_code(g) == 'Piecewise[{{0, x >= 1 || x <= -1},", "+ 3/7' assert mathematica_code(Rational(3, 7)*x) == '(3/7)*x' def test_symbols(): assert mathematica_code(x) == 'x'", "MyFunc2(Function): @classmethod def eval(cls, x, y): pass pytest.raises(ValueError, lambda: mathematica_code(MyFunc1(x), user_functions={'MyFunc1': ['Myfunc1']})) assert" ]
[ "if time.time() > end_time: break # raise TimeoutError() def until(self, func: Callable, **kwargs):", "funcs: pass def logical_map(self, methods: dict): container = [] for key, method in", "def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable,", "func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable, method='until'): authorized_methods", "= None): result = [] if callback is not None: if not callable(callback):", "raise MethodError() for func in funcs: pass def logical_map(self, methods: dict): container =", "None): result = [] if callback is not None: if not callable(callback): raise", "= driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self,", "Callable, timeout: int): self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout)", "_start_polling(self, func, **kwargs): # result = None # results = [] end_time =", "None # results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}]", "until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func: Callable, **kwargs):", "MethodError() for func in funcs: pass def logical_map(self, methods: dict): container = []", "= Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive():", "driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name:", "# return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break # raise TimeoutError()", "**kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return", "timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions = [] self.results = []", "# from zacoby.pipeline import Pipeline from zacoby.settings import settings from zacoby.signals import signal", "Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start()", "self.results = [] def _start_polling(self, func, **kwargs): # result = None # results", "({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs) except Exception: raise else: #", "not None: if not callable(callback): raise TypeError('Callback should be a callable') timer =", "__init__(self, driver: Callable, timeout: int): self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any,", "if callback is not None: if not callable(callback): raise TypeError('Callback should be a", "pass def logical_map(self, methods: dict): container = [] for key, method in methods.items():", "= [] if callback is not None: if not callable(callback): raise TypeError('Callback should", "Callable from pydispatch import dispatcher from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist,", "from zacoby.settings import settings from zacoby.signals import signal class DriverMixin: def __init__(self, driver:", "callback is not None: if not callable(callback): raise TypeError('Callback should be a callable')", "import signal class DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver = driver", "self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def", "'until_not'] if method not in authorized_methods: raise MethodError() for func in funcs: pass", "*funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if method not in authorized_methods: raise", "# results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] -", "= [] def _start_polling(self, func, **kwargs): # result = None # results =", "**kwargs) except Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout) if time.time() >", "self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs:", "import time from threading import Timer from typing import Callable from pydispatch import", "import dispatcher from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from", "timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return result if", "= timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver:", "is not None: if not callable(callback): raise TypeError('Callback should be a callable') timer", "class Pause(DriverMixin): def _start_pause(self, callback = None): result = [] if callback is", "zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from zacoby.settings import settings", "method in methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback = None):", "# result = None # results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting", "from threading import Timer from typing import Callable from pydispatch import dispatcher from", "class Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name", "TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func:", "driver: Callable, timeout: int): self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any, self,", "sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return result if result else", "signal class DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver = driver self.timeout", "func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func: Callable, **kwargs): self._start_polling(func,", "= func(driver=self.driver, **kwargs) except Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout) if", "from pydispatch import dispatcher from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError", "break # raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self", "self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not']", "= [] for key, method in methods.items(): container.append(method()) return self class Pause(DriverMixin): def", "import settings from zacoby.signals import signal class DriverMixin: def __init__(self, driver: Callable, timeout:", "def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if method not in", "def logical_map(self, methods: dict): container = [] for key, method in methods.items(): container.append(method())", "authorized_methods = ['until', 'until_not'] if method not in authorized_methods: raise MethodError() for func", "func in funcs: pass def logical_map(self, methods: dict): container = [] for key,", "methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback = None): result =", "timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda:", "result = [] if callback is not None: if not callable(callback): raise TypeError('Callback", "self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True: try: result = func(driver=self.driver,", "method='until'): authorized_methods = ['until', 'until_not'] if method not in authorized_methods: raise MethodError() for", "dispatcher from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline", "a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer =", "key, method in methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback =", "func(driver=self.driver, **kwargs) except Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout) if time.time()", "**kwargs) return self def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if", "self def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if method not", "function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return", "end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True: try:", "class DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver = driver self.timeout =", "from zacoby.signals import signal class DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver", "except Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time:", "_start_pause(self, callback = None): result = [] if callback is not None: if", "Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel()", "'result': result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)')", "# signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout:", "authorized_methods: raise MethodError() for func in funcs: pass def logical_map(self, methods: dict): container", "callback = None): result = [] if callback is not None: if not", "result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join()", "TypeError('Callback should be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result})", "Timer from typing import Callable from pydispatch import dispatcher from zacoby import global_logger", "for func in funcs: pass def logical_map(self, methods: dict): container = [] for", "def _start_pause(self, callback = None): result = [] if callback is not None:", "self.name = name self.exceptions = [] self.results = [] def _start_polling(self, func, **kwargs):", "for element [{self.name}] - ({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs) except", "self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str,", "[] self.results = [] def _start_polling(self, func, **kwargs): # result = None #", "Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break", "def _start_polling(self, func, **kwargs): # result = None # results = [] end_time", "if not callable(callback): raise TypeError('Callback should be a callable') timer = Timer(self.timeout, function=callback,", "raise TypeError('Callback should be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result':", "['until', 'until_not'] if method not in authorized_methods: raise MethodError() for func in funcs:", "else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if", "not callable(callback): raise TypeError('Callback should be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver':", "func, **kwargs): # result = None # results = [] end_time = sum([time.time(),", "global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return result if result", "self.exceptions = [] self.results = [] def _start_polling(self, func, **kwargs): # result =", "super().__init__(driver, timeout) self.name = name self.exceptions = [] self.results = [] def _start_polling(self,", "Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable, method='until'): authorized_methods =", "timeout) self.name = name self.exceptions = [] self.results = [] def _start_polling(self, func,", "timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout)", "Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions = [] self.results =", "from typing import Callable from pydispatch import dispatcher from zacoby import global_logger from", "timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver: Callable,", "end_time: break # raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return", "import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from", "__init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions", "= None # results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element", "function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering", "methods: dict): container = [] for key, method in methods.items(): container.append(method()) return self", "result = None # results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for", "dict): container = [] for key, method in methods.items(): container.append(method()) return self class", "zacoby.signals import signal class DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver =", "name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions =", "zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline", "self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break # raise TimeoutError() def until(self, func:", "def __init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name", "in authorized_methods: raise MethodError() for func in funcs: pass def logical_map(self, methods: dict):", "mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return result if result else None", "import Timer from typing import Callable from pydispatch import dispatcher from zacoby import", "method not in authorized_methods: raise MethodError() for func in funcs: pass def logical_map(self,", "from zacoby.pipeline import Pipeline from zacoby.settings import settings from zacoby.signals import signal class", "should be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else:", "not in authorized_methods: raise MethodError() for func in funcs: pass def logical_map(self, methods:", "> end_time: break # raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs)", "driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions = [] self.results", "Callable, method='until'): authorized_methods = ['until', 'until_not'] if method not in authorized_methods: raise MethodError()", "sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True: try: result =", "else: # return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break # raise", "typing import Callable from pydispatch import dispatcher from zacoby import global_logger from zacoby.exceptions", "int=10): super().__init__(driver, timeout) self.name = name self.exceptions = [] self.results = [] def", "[] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True:", "in methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback = None): result", "None: if not callable(callback): raise TypeError('Callback should be a callable') timer = Timer(self.timeout,", "time.sleep(self.timeout) if time.time() > end_time: break # raise TimeoutError() def until(self, func: Callable,", "container = [] for key, method in methods.items(): container.append(method()) return self class Pause(DriverMixin):", "result = func(driver=self.driver, **kwargs) except Exception: raise else: # return result self.results.append(result) time.sleep(self.timeout)", "DriverMixin: def __init__(self, driver: Callable, timeout: int): self.driver = driver self.timeout = timeout", "**kwargs): # result = None # results = [] end_time = sum([time.time(), self.timeout])", "def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func: Callable,", "[] for key, method in methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self,", "return self def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if method", "pydispatch import dispatcher from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError #", "True: try: result = func(driver=self.driver, **kwargs) except Exception: raise else: # return result", "return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break # raise TimeoutError() def", "self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver,", "[{self.name}] - ({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs) except Exception: raise", "chains(self, *funcs: Callable, method='until'): authorized_methods = ['until', 'until_not'] if method not in authorized_methods:", "return self class Pause(DriverMixin): def _start_pause(self, callback = None): result = [] if", "ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from zacoby.settings import settings from zacoby.signals", "name self.exceptions = [] self.results = [] def _start_polling(self, func, **kwargs): # result", "# raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def", "raise else: # return result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break #", "**kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable, method='until'): authorized_methods = ['until',", "until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self, *funcs: Callable, method='until'):", "import Pipeline from zacoby.settings import settings from zacoby.signals import signal class DriverMixin: def", "global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from zacoby.settings", "Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name =", "global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs)", "[] def _start_polling(self, func, **kwargs): # result = None # results = []", "time.time() > end_time: break # raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func,", "be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer", "- ({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs) except Exception: raise else:", "from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from zacoby.settings import", "Pipeline from zacoby.settings import settings from zacoby.signals import signal class DriverMixin: def __init__(self,", "signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin): def __init__(self, name: str, driver: Callable, timeout: int=10):", "while True: try: result = func(driver=self.driver, **kwargs) except Exception: raise else: # return", "self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode", "[] if callback is not None: if not callable(callback): raise TypeError('Callback should be", "container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback = None): result = []", "zacoby.settings import settings from zacoby.signals import signal class DriverMixin: def __init__(self, driver: Callable,", "True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not timer.is_alive(): timer.cancel() return result", "element [{self.name}] - ({self.timeout}s)...') while True: try: result = func(driver=self.driver, **kwargs) except Exception:", "= Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda: True)", "result self.results.append(result) time.sleep(self.timeout) if time.time() > end_time: break # raise TimeoutError() def until(self,", "= name self.exceptions = [] self.results = [] def _start_polling(self, func, **kwargs): #", "callable(callback): raise TypeError('Callback should be a callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver,", "import ElementDoesNotExist, MethodError # from zacoby.pipeline import Pipeline from zacoby.settings import settings from", "results = [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...')", "kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep", "= sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while True: try: result", "str, driver: Callable, timeout: int=10): super().__init__(driver, timeout) self.name = name self.exceptions = []", "Pause(DriverMixin): def _start_pause(self, callback = None): result = [] if callback is not", "import Callable from pydispatch import dispatcher from zacoby import global_logger from zacoby.exceptions import", "try: result = func(driver=self.driver, **kwargs) except Exception: raise else: # return result self.results.append(result)", "time from threading import Timer from typing import Callable from pydispatch import dispatcher", "threading import Timer from typing import Callable from pydispatch import dispatcher from zacoby", "raise TimeoutError() def until(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self,", "in funcs: pass def logical_map(self, methods: dict): container = [] for key, method", "**kwargs) return self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def", "= [] end_time = sum([time.time(), self.timeout]) global_logger.info(f'Waiting for element [{self.name}] - ({self.timeout}s)...') while", "callable') timer = Timer(self.timeout, function=callback, kwargs={'driver': self.driver, 'result': result}) else: timer = Timer(self.timeout,", "timer = Timer(self.timeout, function=lambda: True) timer.start() global_logger.info(f'Entering sleep mode ({self.timeout}s)') timer.join() if not", "MethodError # from zacoby.pipeline import Pipeline from zacoby.settings import settings from zacoby.signals import", "if method not in authorized_methods: raise MethodError() for func in funcs: pass def", "int): self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class Wait(DriverMixin):", "zacoby.pipeline import Pipeline from zacoby.settings import settings from zacoby.signals import signal class DriverMixin:", "timeout: int): self.driver = driver self.timeout = timeout # signal.send(dispatcher.Any, self, timeout=timeout) class", "= ['until', 'until_not'] if method not in authorized_methods: raise MethodError() for func in", "self._start_polling(func, **kwargs) return self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self", "return self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs) return self def chains(self,", "for key, method in methods.items(): container.append(method()) return self class Pause(DriverMixin): def _start_pause(self, callback", "settings from zacoby.signals import signal class DriverMixin: def __init__(self, driver: Callable, timeout: int):", "self class Pause(DriverMixin): def _start_pause(self, callback = None): result = [] if callback", "= [] self.results = [] def _start_polling(self, func, **kwargs): # result = None", "logical_map(self, methods: dict): container = [] for key, method in methods.items(): container.append(method()) return", "Callable, **kwargs): self._start_polling(func, **kwargs) return self def until_not(self, func: Callable, **kwargs): self._start_polling(func, **kwargs)", "def __init__(self, driver: Callable, timeout: int): self.driver = driver self.timeout = timeout #", "from zacoby import global_logger from zacoby.exceptions import ElementDoesNotExist, MethodError # from zacoby.pipeline import" ]
[ "def lookup_sequence(files, feature, chrom): \"\"\" use samtools to look up the sequence \"\"\"", "seq = Seq(\"\", generic_dna) id = feature.id for subf in feature.sub_features: seq =", "os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file", "subf.strand if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\")", "\":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq =", "None def main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options,", "subf, chrom) strand = subf.strand if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq,", "os.path.join(path, program) if is_exe(exe_file): return exe_file return None def main(): usage = \"usage:", "import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import subprocess import", "#!/usr/local/bin/python from optparse import OptionParser from BCBio import GFF from Bio.Seq import Seq", "if is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program)", "= seq + lookup_sequence(files, subf, chrom) strand = subf.strand if strand is -1:", "= parser.parse_args() samtools = which(\"samtools\") if samtools is None: print \"samtools must executable,", "which(\"samtools\") if samtools is None: print \"samtools must executable, add it to your", "lookup_sequence(files, subf, chrom) strand = subf.strand if strand is -1: seq = seq.reverse_complement()", "files['samtools'] = samtools if len(args) != 2: print usage exit(-1) files['seq_file'] = args[0]", "exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print", "strand = subf.strand if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records,", "sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools to look up the", "+ line.strip() seq = Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath): return", "for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return", "\"\" for line in child.stdout: if line.strip()[0] == \">\": continue seq = seq", "= \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args) = parser.parse_args() samtools", "program) if is_exe(exe_file): return exe_file return None def main(): usage = \"usage: gtf2fasta", "generic_dna) id = feature.id for subf in feature.sub_features: seq = seq + lookup_sequence(files,", "os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else:", "args) = parser.parse_args() samtools = which(\"samtools\") if samtools is None: print \"samtools must", "os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print", "\"seq_file does not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does", "from BCBio import GFF from Bio.Seq import Seq from Bio import SeqIO from", "stdout=subprocess.PIPE) seq = \"\" for line in child.stdout: if line.strip()[0] == \">\": continue", "\" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools", "\"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args) = parser.parse_args() samtools =", "chrom = rec.id for feature in rec.features: if feature.sub_features == []: seq =", "samtools to look up the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom)", "os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for", "seq_file gtf_file\" parser = OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\") if", "subprocess import os import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records = []", "in feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom) strand = subf.strand if", "fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in", "[]: seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else:", "from Bio import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import", "strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files,", "in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def", "subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in child.stdout: if line.strip()[0] == \">\":", "str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for", "= which(\"samtools\") if samtools is None: print \"samtools must executable, add it to", "+ \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line", "\"\"\" use samtools to look up the sequence \"\"\" args = [files['samtools'], \"faidx\",", "seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname =", "import Seq from Bio import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import", "if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage exit(-1) lookupSequences(files) if", "line.strip()[0] == \">\": continue seq = seq + line.strip() seq = Seq(seq, generic_dna)", "import subprocess import os import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records =", "lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\",", "id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools to look", "for feature in rec.features: if feature.sub_features == []: seq = lookup_sequence(files, feature, chrom)", "id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\", generic_dna) id =", "= feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\", generic_dna) id = feature.id", "line.strip() seq = Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath): return os.path.exists(fpath)", "feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\", generic_dna) id = feature.id for", "gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\")", "Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK)", "args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage exit(-1) if", "\"gtf_file does not exist\" print usage exit(-1) lookupSequences(files) if __name__ == \"__main__\": main()", "= seq + line.strip() seq = Seq(seq, generic_dna) return seq def which(program): def", "main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args) =", "-1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom):", "files['seq_file'] = args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not", "= Seq(\"\", generic_dna) id = feature.id for subf in feature.sub_features: seq = seq", "\">\": continue seq = seq + line.strip() seq = Seq(seq, generic_dna) return seq", "else: seq = Seq(\"\", generic_dna) id = feature.id for subf in feature.sub_features: seq", "from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import subprocess import os import", "executable, add it to your path or \" \\ \"download it from http://samtools.sourceforge.net/\"", "feature.sub_features == []: seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand =", "seq = \"\" for line in child.stdout: if line.strip()[0] == \">\": continue seq", "for line in child.stdout: if line.strip()[0] == \">\": continue seq = seq +", "= samtools if len(args) != 2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file']", "if line.strip()[0] == \">\": continue seq = seq + line.strip() seq = Seq(seq,", "seq + lookup_sequence(files, subf, chrom) strand = subf.strand if strand is -1: seq", "Bio.SeqRecord import SeqRecord import subprocess import os import sys def lookupSequences(files): gtf_file =", "import GFF from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import", "print \"gtf_file does not exist\" print usage exit(-1) lookupSequences(files) if __name__ == \"__main__\":", "exe_file return None def main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser =", "if len(args) != 2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1]", "os import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for rec", "open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file): chrom = rec.id for feature", "= feature.strand else: seq = Seq(\"\", generic_dna) id = feature.id for subf in", "{} files['samtools'] = samtools if len(args) != 2: print usage exit(-1) files['seq_file'] =", "= open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file): chrom = rec.id for", "Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord", "== []: seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand", "is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature,", "if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def", "return exe_file return None def main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser", "feature in rec.features: if feature.sub_features == []: seq = lookup_sequence(files, feature, chrom) id", "None: print \"samtools must executable, add it to your path or \" \\", "import os import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for", "return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program):", "os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def main():", "samtools if len(args) != 2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] =", "= \"\" for line in child.stdout: if line.strip()[0] == \">\": continue seq =", "use samtools to look up the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'],", "= Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath,", "program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return", "subf in feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom) strand = subf.strand", "it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools if len(args) !=", "= seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use", "args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print", "feature, chrom): \"\"\" use samtools to look up the sequence \"\"\" args =", "Seq(\"\", generic_dna) id = feature.id for subf in feature.sub_features: seq = seq +", "from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import generic_dna from", "not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\"", "gtf_file = open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file): chrom = rec.id", "+ \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq", "files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage", "= args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage exit(-1)", "is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if", "= lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq =", "OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\") if samtools is None: print", "= {} files['samtools'] = samtools if len(args) != 2: print usage exit(-1) files['seq_file']", "records = [] for rec in GFF.parse(gtf_file): chrom = rec.id for feature in", "str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in child.stdout: if", "http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools if len(args) != 2: print", "files = {} files['samtools'] = samtools if len(args) != 2: print usage exit(-1)", "from Bio.SeqRecord import SeqRecord import subprocess import os import sys def lookupSequences(files): gtf_file", "\\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools if", "does not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not", "files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args,", "print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage", "args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" +", "continue seq = seq + line.strip() seq = Seq(seq, generic_dna) return seq def", "\"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\"", "id = feature.id for subf in feature.sub_features: seq = seq + lookup_sequence(files, subf,", "samtools = which(\"samtools\") if samtools is None: print \"samtools must executable, add it", "parser = OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\") if samtools is", "os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage exit(-1) lookupSequences(files) if __name__ ==", "str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE)", "= os.path.join(path, program) if is_exe(exe_file): return exe_file return None def main(): usage =", "if fpath: if is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file =", "feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom) strand = subf.strand if strand", "def main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args)", "lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file): chrom =", "from optparse import OptionParser from BCBio import GFF from Bio.Seq import Seq from", "child.stdout: if line.strip()[0] == \">\": continue seq = seq + line.strip() seq =", "chrom): \"\"\" use samtools to look up the sequence \"\"\" args = [files['samtools'],", "feature.id for subf in feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom) strand", "from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools if len(args) != 2:", "GFF.parse(gtf_file): chrom = rec.id for feature in rec.features: if feature.sub_features == []: seq", "your path or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files = {}", "chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\", generic_dna) id", "OptionParser from BCBio import GFF from Bio.Seq import Seq from Bio import SeqIO", "[files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child", "usage = \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser() (options, args) = parser.parse_args()", "rec in GFF.parse(gtf_file): chrom = rec.id for feature in rec.features: if feature.sub_features ==", "which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if", "path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None", "seq + line.strip() seq = Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath):", "not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage exit(-1) lookupSequences(files) if __name__", "\"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in", "= os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep):", "parser.parse_args() samtools = which(\"samtools\") if samtools is None: print \"samtools must executable, add", "\"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child =", "2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']):", "sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file):", "= rec.id for feature in rec.features: if feature.sub_features == []: seq = lookup_sequence(files,", "and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program", "print \"samtools must executable, add it to your path or \" \\ \"download", "if samtools is None: print \"samtools must executable, add it to your path", "import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for rec in", "seq = Seq(seq, generic_dna) return seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and", "usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage exit(-1)", "optparse import OptionParser from BCBio import GFF from Bio.Seq import Seq from Bio", "look up the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\"", "def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program)", "is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if", "SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools to look up", "strand = feature.strand else: seq = Seq(\"\", generic_dna) id = feature.id for subf", "seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools", "sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) +", "= [] for rec in GFF.parse(gtf_file): chrom = rec.id for feature in rec.features:", "SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import subprocess import os", "else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file", "len(args) != 2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if", "feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq = Seq(\"\", generic_dna)", "+ lookup_sequence(files, subf, chrom) strand = subf.strand if strand is -1: seq =", "= [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)]", "exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def main(): usage", "fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path", "is_exe(exe_file): return exe_file return None def main(): usage = \"usage: gtf2fasta seq_file gtf_file\"", "\"samtools must executable, add it to your path or \" \\ \"download it", "!= 2: print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if not", "exit(-1) files = {} files['samtools'] = samtools if len(args) != 2: print usage", "path or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools']", "in GFF.parse(gtf_file): chrom = rec.id for feature in rec.features: if feature.sub_features == []:", "+ str(int(str(feature.location.start))+1) + \"-\" + str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\"", "<reponame>joshsbloom/eQTL_BYxRM #!/usr/local/bin/python from optparse import OptionParser from BCBio import GFF from Bio.Seq import", "chrom) strand = subf.strand if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id))", "def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath:", "in child.stdout: if line.strip()[0] == \">\": continue seq = seq + line.strip() seq", "generic_dna from Bio.SeqRecord import SeqRecord import subprocess import os import sys def lookupSequences(files):", "gtf_file\" parser = OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\") if samtools", "return None def main(): usage = \"usage: gtf2fasta seq_file gtf_file\" parser = OptionParser()", "import SeqRecord import subprocess import os import sys def lookupSequences(files): gtf_file = open(files['gtf_file'])", "up the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" +", "Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import subprocess import os import sys", "return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file):", "fpath: if is_exe(program): return program else: for path in os.environ[\"PATH\"].split(os.pathsep): exe_file = os.path.join(path,", "samtools is None: print \"samtools must executable, add it to your path or", "for subf in feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom) strand =", "if is_exe(exe_file): return exe_file return None def main(): usage = \"usage: gtf2fasta seq_file", "it to your path or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files", "= args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file does not exist\"", "not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']):", "\"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools to look up the sequence", "== \">\": continue seq = seq + line.strip() seq = Seq(seq, generic_dna) return", "rec.features: if feature.sub_features == []: seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0]", "usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file", "os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return", "seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\"", "generic_dna) return seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath,", "if not os.path.exists(files['seq_file']): print \"seq_file does not exist\" print usage exit(-1) if not", "in rec.features: if feature.sub_features == []: seq = lookup_sequence(files, feature, chrom) id =", "for rec in GFF.parse(gtf_file): chrom = rec.id for feature in rec.features: if feature.sub_features", "import OptionParser from BCBio import GFF from Bio.Seq import Seq from Bio import", "(options, args) = parser.parse_args() samtools = which(\"samtools\") if samtools is None: print \"samtools", "print \"seq_file does not exist\" print usage exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file", "return seq def which(program): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname", "SeqRecord import subprocess import os import sys def lookupSequences(files): gtf_file = open(files['gtf_file']) records", "child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in child.stdout: if line.strip()[0]", "Seq from Bio import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord", "seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand = feature.strand else: seq", "or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] =", "GFF from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import generic_dna", "= subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in child.stdout: if line.strip()[0] ==", "is None: print \"samtools must executable, add it to your path or \"", "lookup_sequence(files, feature, chrom): \"\"\" use samtools to look up the sequence \"\"\" args", "seq = seq + lookup_sequence(files, subf, chrom) strand = subf.strand if strand is", "[] for rec in GFF.parse(gtf_file): chrom = rec.id for feature in rec.features: if", "+ str(feature.location.end)] child = subprocess.Popen(args, stdout=subprocess.PIPE) seq = \"\" for line in child.stdout:", "print usage exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print", "exit(-1) if not os.path.exists(files['gtf_file']): print \"gtf_file does not exist\" print usage exit(-1) lookupSequences(files)", "the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) + \":\" + str(int(str(feature.location.start))+1)", "rec.id for feature in rec.features: if feature.sub_features == []: seq = lookup_sequence(files, feature,", "line in child.stdout: if line.strip()[0] == \">\": continue seq = seq + line.strip()", "BCBio import GFF from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet", "exit(-1) files['seq_file'] = args[0] files['gtf_file'] = args[1] if not os.path.exists(files['seq_file']): print \"seq_file does", "def lookupSequences(files): gtf_file = open(files['gtf_file']) records = [] for rec in GFF.parse(gtf_file): chrom", "= feature.id for subf in feature.sub_features: seq = seq + lookup_sequence(files, subf, chrom)", "if feature.sub_features == []: seq = lookup_sequence(files, feature, chrom) id = feature.qualifiers['transcript_id'][0] strand", "to your path or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1) files =", "records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout, \"fasta\") def lookup_sequence(files, feature, chrom): \"\"\" use samtools to", "= OptionParser() (options, args) = parser.parse_args() samtools = which(\"samtools\") if samtools is None:", "import generic_dna from Bio.SeqRecord import SeqRecord import subprocess import os import sys def", "must executable, add it to your path or \" \\ \"download it from", "\"download it from http://samtools.sourceforge.net/\" exit(-1) files = {} files['samtools'] = samtools if len(args)", "add it to your path or \" \\ \"download it from http://samtools.sourceforge.net/\" exit(-1)", "Bio import SeqIO from Bio.Alphabet import generic_dna from Bio.SeqRecord import SeqRecord import subprocess", "seq = seq + line.strip() seq = Seq(seq, generic_dna) return seq def which(program):", "= subf.strand if strand is -1: seq = seq.reverse_complement() records.append(SeqRecord(seq, id=id)) SeqIO.write(records, sys.stdout,", "to look up the sequence \"\"\" args = [files['samtools'], \"faidx\", files['seq_file'], str(chrom) +", "feature.strand else: seq = Seq(\"\", generic_dna) id = feature.id for subf in feature.sub_features:" ]
[ "-*- coding: utf-8 -*- import sys from redis import Redis from rq import", "# similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:])", "rq import Queue, Connection, Worker from mailhook.config import config # Preload libraries import", "similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or", "import twilio # Provide queue names to listen to as arguments to this", "Worker from mailhook.config import config # Preload libraries import twilio # Provide queue", "# Preload libraries import twilio # Provide queue names to listen to as", "redis import Redis from rq import Queue, Connection, Worker from mailhook.config import config", "queue names to listen to as arguments to this script, # similar to", "from mailhook.config import config # Preload libraries import twilio # Provide queue names", "-*- import sys from redis import Redis from rq import Queue, Connection, Worker", "Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or [Queue()] w = Worker(qs) w.work()", "from redis import Redis from rq import Queue, Connection, Worker from mailhook.config import", "import sys from redis import Redis from rq import Queue, Connection, Worker from", "to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or [Queue()]", "Preload libraries import twilio # Provide queue names to listen to as arguments", "coding: utf-8 -*- import sys from redis import Redis from rq import Queue,", "# -*- coding: utf-8 -*- import sys from redis import Redis from rq", "Connection, Worker from mailhook.config import config # Preload libraries import twilio # Provide", "to listen to as arguments to this script, # similar to rqworker redis_conn", "as arguments to this script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST) with", "Redis from rq import Queue, Connection, Worker from mailhook.config import config # Preload", "import Queue, Connection, Worker from mailhook.config import config # Preload libraries import twilio", "import config # Preload libraries import twilio # Provide queue names to listen", "redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or [Queue()] w =", "utf-8 -*- import sys from redis import Redis from rq import Queue, Connection,", "config # Preload libraries import twilio # Provide queue names to listen to", "Queue, Connection, Worker from mailhook.config import config # Preload libraries import twilio #", "rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or [Queue()] w", "import Redis from rq import Queue, Connection, Worker from mailhook.config import config #", "names to listen to as arguments to this script, # similar to rqworker", "# Provide queue names to listen to as arguments to this script, #", "script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue,", "= Redis(config.REDIS_HOST) with Connection(redis_conn): qs = map(Queue, sys.argv[1:]) or [Queue()] w = Worker(qs)", "sys from redis import Redis from rq import Queue, Connection, Worker from mailhook.config", "mailhook.config import config # Preload libraries import twilio # Provide queue names to", "to this script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs", "this script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn): qs =", "to as arguments to this script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST)", "twilio # Provide queue names to listen to as arguments to this script,", "from rq import Queue, Connection, Worker from mailhook.config import config # Preload libraries", "listen to as arguments to this script, # similar to rqworker redis_conn =", "arguments to this script, # similar to rqworker redis_conn = Redis(config.REDIS_HOST) with Connection(redis_conn):", "Provide queue names to listen to as arguments to this script, # similar", "libraries import twilio # Provide queue names to listen to as arguments to" ]
[ "= \"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip", "RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__))", "here _ip='0.0.0.0' _port=1234 _url = \"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000,", "-> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None", "python3 import xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put your server IP", "on 5 janv. 2022 @author: slinux ''' import sys # The answer is", "#filename = sys.argv[1] #fpn = curDir + '/' + filename fpn = filename", "port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip)", "_client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port =", "filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae))", "self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if", "that the module xmlrpc is part of python3 import xmlrpc.client import os import", "_port=1234 _url = \"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip", "filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file ->", "def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir +", "JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload #.add_json(self.compile_message(message))", "({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with", "remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload", "return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload =", "xmlrpc is part of python3 import xmlrpc.client import os import logging class IPFS_RPC_Client(object):", "class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client", "os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as", "janv. 2022 @author: slinux ''' import sys # The answer is that the", "port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a", "self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}')", "useHTTPS=False): self._ip = ip self._port = port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip,", "ip self._port = port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS:", "not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\")", "logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url", "'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating", "slinux ''' import sys # The answer is that the module xmlrpc is", "filename fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info('", "os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing", "__init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port = port self.logger = logging.getLogger('wxRaven')", "# The answer is that the module xmlrpc is part of python3 import", "of python3 import xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put your server", "module xmlrpc is part of python3 import xmlrpc.client import os import logging class", "-> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn))", "#sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload", "None with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae)", "#Put your server IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client = None", "self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload =", "({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1)", "at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename =", "= port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url =", "sys # The answer is that the module xmlrpc is part of python3", "logging class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234 _url = \"\"", "_resultUpload = None with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload =", "remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not", "your server IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client = None def", "server IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client = None def __init__(self,", "Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename", "self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at {self.url}') self._client =", "'{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def", "def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload = {_resultUpload}') return", "filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir + '/' +", "-> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as handle: binary_data =", "os import logging class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234 _url", "file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as handle: binary_data", "self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC", "= None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port = port", "= os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn):", "= None with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data,", "= xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self,", "is that the module xmlrpc is part of python3 import xmlrpc.client import os", "if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new", "os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir + '/' + filename fpn =", "self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload", "sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload", "part of python3 import xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put your", "ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at {self.url}') self._client", "self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file", "= os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir + '/' + filename fpn", "_resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON", "answer is that the module xmlrpc is part of python3 import xmlrpc.client import", "= 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip)", "if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at {self.url}')", "Created on 5 janv. 2022 @author: slinux ''' import sys # The answer", "''' import sys # The answer is that the module xmlrpc is part", "handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload", "({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read())", "new IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir", "_resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload = {_resultUpload}')", "binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def", "import xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put your server IP here", "sys.argv[1] #fpn = curDir + '/' + filename fpn = filename localadd, remotefnae", "\"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}')", "_url = \"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip =", "= '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url)", "curDir + '/' + filename fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info('", "if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn,", "the module xmlrpc is part of python3 import xmlrpc.client import os import logging", "self._ip = ip self._port = port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port)", "import sys # The answer is that the module xmlrpc is part of", "+ filename fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename))", "with open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload", "+ '/' + filename fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename", "IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir =", "= sys.argv[1] #fpn = curDir + '/' + filename fpn = filename localadd,", "localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn -> ({})'.format(remotefnae)) if", "'/' + filename fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename ->", "fpn -> ({})'.format(remotefnae)) if not os.path.exists(fpn): self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload =", "xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir", "self.logger.info(f'Creating a new IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self,", "The answer is that the module xmlrpc is part of python3 import xmlrpc.client", "IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client =", "def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port = port self.logger =", "ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port = port self.logger = logging.getLogger('wxRaven') self.url", "= logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'):", "#fpn = curDir + '/' + filename fpn = filename localadd, remotefnae =", "{_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON) #self.logger.info(f'_resultUpload", "is part of python3 import xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put", "= 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client", "fpn = filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn", "2022 @author: slinux ''' import sys # The answer is that the module", "''' Created on 5 janv. 2022 @author: slinux ''' import sys # The", "self.logger.info('Missing file -> ({})'.format(fpn)) #sys.exit(1) _resultUpload = None with open(fpn, \"rb\") as handle:", "= self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON =", "@author: slinux ''' import sys # The answer is that the module xmlrpc", "curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir + '/' + filename", "self._port = port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url", "5 janv. 2022 @author: slinux ''' import sys # The answer is that", "_ip='0.0.0.0' _port=1234 _url = \"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False):", "import os import logging class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234", "None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port = port self.logger", "= ip self._port = port self.logger = logging.getLogger('wxRaven') self.url = 'http://{}:{}'.format(ip, port) if", "= filename localadd, remotefnae = os.path.split(filename) self.logger.info(' filename -> ({})'.format(filename)) self.logger.info(' fpn ->", "self.url = 'http://{}:{}'.format(ip, port) if useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url =", "import logging class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0' _port=1234 _url =", "open(fpn, \"rb\") as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload =", "= {_resultUpload}') return _resultUpload def sendJSON(self, JSON): self.logger.info(f'JSON = {JSON}') _resultUpload = self._client.server_receive_json(JSON)", "xmlrpc.client import os import logging class IPFS_RPC_Client(object): #Put your server IP here _ip='0.0.0.0'", "useHTTPS: self.url = 'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS", "'https://{}'.format(ip) if ip.__contains__('http'): self.url = '{}'.format(ip) self.logger.info(f'Creating a new IPFS RPC Client at", "IP here _ip='0.0.0.0' _port=1234 _url = \"\" _client = None def __init__(self, ip=\"127.0.0.1\",", "= curDir + '/' + filename fpn = filename localadd, remotefnae = os.path.split(filename)", "sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn = curDir + '/'", "{self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1]", "= xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn =", "self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename): curDir = os.path.dirname(os.path.realpath(__file__)) #filename = sys.argv[1] #fpn", "as handle: binary_data = xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return", "\"\" _client = None def __init__(self, ip=\"127.0.0.1\", port=9000, useHTTPS=False): self._ip = ip self._port", "xmlrpc.client.Binary(handle.read()) _resultUpload = self._client.server_receive_file(binary_data, remotefnae) self.logger.info(f'_resultUpload = {_resultUpload}') return _resultUpload def sendJSON(self, JSON):", "port=9000, useHTTPS=False): self._ip = ip self._port = port self.logger = logging.getLogger('wxRaven') self.url =", "a new IPFS RPC Client at {self.url}') self._client = xmlrpc.client.ServerProxy(self.url) def sendFile(self, filename):" ]
[ "import GuruSerializer from problem.serializers import ProbSerializer import json, requests from django.http import JsonResponse", "from .models import user, country, organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer,", "instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def", "from django.db.models import Q from django.template.loader import render_to_string from user.permissions import * def", "post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted", "permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK',", "import Q from django.template.loader import render_to_string from user.permissions import * def data(URL): return", "return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer", "from user.serializers import GuruSerializer from problem.serializers import ProbSerializer import json, requests from django.http", "validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request): serializer =", "import generics, mixins, permissions from .models import user, country, organization, contest from .serializers", "serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added", "from user.permissions import * def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ):", "'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status':", ".serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers import", "'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add(", "request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'})", "'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def testing(request): codeforces_update_problems() return JsonResponse({'status':", "Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user))", "user.models import Profile from django.db.models import Q from django.template.loader import render_to_string from user.permissions", "serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request): serializer", "def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class", "GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self,", "'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def testing(request): codeforces_update_problems() return JsonResponse({'status': 'OK'})", "render_to_string from user.permissions import * def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView,", "'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True):", "import * def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes =", "organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer", "import ProbSerializer import json, requests from django.http import JsonResponse from user.models import Profile", "'result': 'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return", "def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status':", "from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers", "self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def", "OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers import ProbSerializer import json, requests", "rest_framework.views import APIView from rest_framework.response import Response from rest_framework import generics, mixins, permissions", "data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class =", "Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user),", "put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK',", "def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result':", "class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self,", "Profile from django.db.models import Q from django.template.loader import render_to_string from user.permissions import *", "MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request):", "request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result':", "<gh_stars>0 from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import generics,", "get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer", "generics, mixins, permissions from .models import user, country, organization, contest from .serializers import", "django.template.loader import render_to_string from user.permissions import * def data(URL): return requests.get(URL).json() class MentorAPIView(", "rest_framework.response import Response from rest_framework import generics, mixins, permissions from .models import user,", "UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers import ProbSerializer import", "ContestSerializer from user.serializers import GuruSerializer from problem.serializers import ProbSerializer import json, requests from", "user, country, organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers", "rest_framework import generics, mixins, permissions from .models import user, country, organization, contest from", "'OK', 'result': 'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data)", "requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def", "Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK',", "CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers import ProbSerializer import json,", "generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return JsonResponse({", "import json, requests from django.http import JsonResponse from user.models import Profile from django.db.models", "import APIView from rest_framework.response import Response from rest_framework import generics, mixins, permissions from", "[AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1]", "serializer_class = GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] })", ".models import user, country, organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer", "* def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated]", "APIView from rest_framework.response import Response from rest_framework import generics, mixins, permissions from .models", "django.http import JsonResponse from user.models import Profile from django.db.models import Q from django.template.loader", "mixins, permissions from .models import user, country, organization, contest from .serializers import UserSerializer,", "Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def testing(request): codeforces_update_problems() return", "import Profile from django.db.models import Q from django.template.loader import render_to_string from user.permissions import", "JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data) if", "from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import generics, mixins,", "django.db.models import Q from django.template.loader import render_to_string from user.permissions import * def data(URL):", "data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def testing(request):", "JsonResponse from user.models import Profile from django.db.models import Q from django.template.loader import render_to_string", "permissions from .models import user, country, organization, contest from .serializers import UserSerializer, CountrySerializer,", "from django.template.loader import render_to_string from user.permissions import * def data(URL): return requests.get(URL).json() class", "return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems def testing(request): codeforces_update_problems()", "= [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK', 'result':", "from user.models import Profile from django.db.models import Q from django.template.loader import render_to_string from", "import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from problem.serializers import ProbSerializer", "import Response from rest_framework import generics, mixins, permissions from .models import user, country,", "def get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request):", "mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return", "user.serializers import GuruSerializer from problem.serializers import ProbSerializer import json, requests from django.http import", "from rest_framework import generics, mixins, permissions from .models import user, country, organization, contest", "from rest_framework.response import Response from rest_framework import generics, mixins, permissions from .models import", "country, organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import", "request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer =", "): permission_classes = [AuthenticatedActivated] serializer_class = GuruSerializer def get(self, request): return JsonResponse({ 'status':", "'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data,", "Response from rest_framework import generics, mixins, permissions from .models import user, country, organization,", "import user, country, organization, contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from", "}) def put(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return", "return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data) serializer.delete(", "import render_to_string from user.permissions import * def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin,", "= GuruSerializer def get(self, request): return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def", "serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request):", "ProbSerializer import json, requests from django.http import JsonResponse from user.models import Profile from", "import JsonResponse from user.models import Profile from django.db.models import Q from django.template.loader import", "json, requests from django.http import JsonResponse from user.models import Profile from django.db.models import", "= self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron", "problem.serializers import ProbSerializer import json, requests from django.http import JsonResponse from user.models import", "self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import", "Q from django.template.loader import render_to_string from user.permissions import * def data(URL): return requests.get(URL).json()", "from problem.serializers import ProbSerializer import json, requests from django.http import JsonResponse from user.models", "from django.http import JsonResponse from user.models import Profile from django.db.models import Q from", "return JsonResponse({ 'status': 'OK', 'result': Profile.objects.get(owner=self.request.user).gurus.split(',')[1:-1] }) def put(self, request): serializer = self.serializer_class(data=request.data)", "requests from django.http import JsonResponse from user.models import Profile from django.db.models import Q", "contest from .serializers import UserSerializer, CountrySerializer, OrganizationSerializer, ContestSerializer from user.serializers import GuruSerializer from", "GuruSerializer from problem.serializers import ProbSerializer import json, requests from django.http import JsonResponse from", "if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self,", "= self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): serializer.add( validated_data=request.data, instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'})", "serializer = self.serializer_class(data=request.data) serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from", "serializer.delete( instance=Profile.objects.get(owner=self.request.user), data=request.data) return Response({'status': 'OK', 'result': 'Deleted Successfully'}) from .cron import codeforces_update_problems", "instance=Profile.objects.get(owner=self.request.user)) return Response({'status': 'OK', 'result': 'Added Successfully'}) def post(self, request): serializer = self.serializer_class(data=request.data)", "user.permissions import * def data(URL): return requests.get(URL).json() class MentorAPIView( mixins.CreateModelMixin, generics.ListAPIView, ): permission_classes" ]
[ "class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor", "extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor = OpelDetails", "<filename>vininfo/brands.py from .common import Brand from .details import * class Lada(Brand): extractor =", "= AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor = OpelDetails class", "AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor = OpelDetails class Renault(Brand):", "import Brand from .details import * class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand):", "from .common import Brand from .details import * class Lada(Brand): extractor = AvtoVazDetails", "from .details import * class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor =", "extractor = NissanDetails class Opel(Brand): extractor = OpelDetails class Renault(Brand): extractor = RenaultDetails", "Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor = OpelDetails class Renault(Brand): extractor =", ".common import Brand from .details import * class Lada(Brand): extractor = AvtoVazDetails class", "import * class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class", "class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor = OpelDetails class Renault(Brand): extractor", ".details import * class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails", "* class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand):", "Brand from .details import * class Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor", "Lada(Brand): extractor = AvtoVazDetails class Nissan(Brand): extractor = NissanDetails class Opel(Brand): extractor =" ]
[ "process each line in the input file, # skipping any blank lines input_data", "* 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data", "whitespace (e.g. newlines) if delimiter: # Trim whitespace from and process each item", "data file found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if", "input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as", "= self.verbosity max_v = v > 1 line_endings = '\\n' if max_v else", "free to manipulate it without # affecting subsequent parts part_input_data = input_data[:] else:", "input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def solve(self):", "self.sample else 'input' return os.path.join(path, filename) def process_input_item(self, input_line): return input_line def process_input_data(self,", "delimiter to use to separate the input data into a list for subsequent", "input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity", "v = self.verbosity max_v = v > 1 line_endings = '\\n' if max_v", "_process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity =", "in raw_input.split(delimiter)] else: # Process the raw input data directly input_data = self.process_input_data(raw_input)", "1 line_endings = '\\n' if max_v else '' # Get input if max_v:", "(datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines' elif", "', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50, sep='') def", "without # affecting subsequent parts part_input_data = input_data[:] else: part_input_data = input_data if", "configured delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process the", "use to separate the input data into a list for subsequent # processing.", "try: input_data = self.get_input() except FileNotFoundError: print(f'No input data file found (looked in", "end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No input data", "else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start =", "processing. E.g. '\\n', ',', etc. Delimited items can be processed prior to #", "sample self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if", "input data into a list for subsequent # processing. E.g. '\\n', ',', etc.", "if self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings)", "v > 1 line_endings = '\\n' if max_v else '' # Get input", "{sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input() except", "overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity", "part_input_data = input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving ', end='') print('Part", "prior to # being added to the input list by overriding _process_input_item(). #", "[{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50, sep='') def _part1(self, input_data): raise", "== '\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)}", "import inspect import os import sys class Puzzle: # The delimiter to use", "print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50, sep='')", "can be processed # by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False,", "start = datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v:", "'.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds()", "input_data def _do_solve(self, solvers): v = self.verbosity max_v = v > 1 line_endings", "overriding _process_input_item(). # Set to None to read the data in whole. In", "solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution,", "filename = 'sample' if self.sample else 'input' return os.path.join(path, filename) def process_input_item(self, input_line):", "start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '='", "after applying the configured delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else:", "file found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter", "end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError:", "# processing. E.g. '\\n', ',', etc. Delimited items can be processed prior to", "import os import sys class Puzzle: # The delimiter to use to separate", "{}... '.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() -", "added to the input list by overriding _process_input_item(). # Set to None to", "start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc", "= f.read().strip() # trim whitespace (e.g. newlines) if delimiter: # Trim whitespace from", "inspect import os import sys class Puzzle: # The delimiter to use to", "newlines) if delimiter: # Trim whitespace from and process each item in the", "'sample' if self.sample else 'input' return os.path.join(path, filename) def process_input_item(self, input_line): return input_line", "if self.sample else 'input' return os.path.join(path, filename) def process_input_item(self, input_line): return input_line def", "filename) def process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return input_data def get_input(self):", "in f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() #", "for part, solver in solvers: if self.input_delimiter: # Copy the data so each", "= datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution:", "the data so each part is free to manipulate it without # affecting", "line in the input file, # skipping any blank lines input_data = []", "= self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as f:", "= input_data if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start =", "= sample self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample'", "line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace (e.g. newlines)", "input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace (e.g. newlines) if delimiter: #", "({type(input_data)}) [{t}s]') # Run solvers for part, solver in solvers: if self.input_delimiter: #", "self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has", "self.get_input() except FileNotFoundError: print(f'No input data file found (looked in {self.get_input_file_name()}).') return t", "- start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n',", "[] for line in f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else: raw_input", "= '**SAMPLE** ' if self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='')", "input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def", "raw_input = f.read().strip() # trim whitespace (e.g. newlines) if delimiter: # Trim whitespace", "= (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if", "f.read().strip() # trim whitespace (e.g. newlines) if delimiter: # Trim whitespace from and", "to use to separate the input data into a list for subsequent #", "if max_v else '' # Get input if max_v: sample = '**SAMPLE** '", "line = line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace", "manipulate it without # affecting subsequent parts part_input_data = input_data[:] else: part_input_data =", "input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process the raw input", "= solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='') print('{}", "case, data can be processed # by overriding _process_input_data(). input_delimiter = '\\n' def", "f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input()", "return os.path.join(path, filename) def process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return input_data", "whitespace from and process each line in the input file, # skipping any", "bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for", "to None to read the data in whole. In this case, data can", "* 50, sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError()", "input_data = [] for line in f.readlines(): line = line.strip() if line: input_data.append(process_item(line))", "items can be processed prior to # being added to the input list", "def process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return input_data def get_input(self): input_file", "= f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc = f'is {size} bytes'", "print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part, solver in", "if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' *", "to separate the input data into a list for subsequent # processing. E.g.", "= 'sample' if self.sample else 'input' return os.path.join(path, filename) def process_input_item(self, input_line): return", "'\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity def get_input_file_name(self):", "print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try:", "data in whole. In this case, data can be processed # by overriding", "be processed # by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2):", "blank lines input_data = [] for line in f.readlines(): line = line.strip() if", "max_v: print('\\n', '=' * 50, sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self,", "get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input' return os.path.join(path,", "solvers: if self.input_delimiter: # Copy the data so each part is free to", "input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part),", "def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1,", "for line in f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else: raw_input =", "'\\n' if max_v else '' # Get input if max_v: sample = '**SAMPLE**", "etc. Delimited items can be processed prior to # being added to the", "input_desc = f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc = f'is {size}", "'input' return os.path.join(path, filename) def process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return", "data into a list for subsequent # processing. E.g. '\\n', ',', etc. Delimited", "datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ',", "{size} bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers", "In this case, data can be processed # by overriding _process_input_data(). input_delimiter =", "return input_data def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item", "parts part_input_data = input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving ', end='')", "directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v = self.verbosity max_v", "{self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc =", "sample = '**SAMPLE** ' if self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}',", "max_v = v > 1 line_endings = '\\n' if max_v else '' #", "' if self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ',", "else: size = sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v: print('Input ',", "max_v else '' # Get input if max_v: sample = '**SAMPLE** ' if", "max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50,", "self.process_input_item with open(input_file, 'r') as f: if delimiter == '\\n': # Trim whitespace", "', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part, solver in solvers:", "process_item = self.process_input_item with open(input_file, 'r') as f: if delimiter == '\\n': #", "print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part, solver in solvers: if self.input_delimiter:", "to the input list by overriding _process_input_item(). # Set to None to read", "in whole. In this case, data can be processed # by overriding _process_input_data().", "end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t =", "input list by overriding _process_input_item(). # Set to None to read the data", "found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter ==", "the data in whole. In this case, data can be processed # by", "from and process each item in the raw # input data after applying", "a list for subsequent # processing. E.g. '\\n', ',', etc. Delimited items can", "(e.g. newlines) if delimiter: # Trim whitespace from and process each item in", "self.sample = sample self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename =", "solvers): v = self.verbosity max_v = v > 1 line_endings = '\\n' if", "subsequent # processing. E.g. '\\n', ',', etc. Delimited items can be processed prior", "_process_input_item(). # Set to None to read the data in whole. In this", "except FileNotFoundError: print(f'No input data file found (looked in {self.get_input_file_name()}).') return t =", "# by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample =", "os.path.join(path, filename) def process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return input_data def", "so each part is free to manipulate it without # affecting subsequent parts", "can be processed prior to # being added to the input list by", "read the data in whole. In this case, data can be processed #", "by overriding _process_input_item(). # Set to None to read the data in whole.", "if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution", "'=' * 50, sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise", "# Set to None to read the data in whole. In this case,", "= v > 1 line_endings = '\\n' if max_v else '' # Get", "# Trim whitespace from and process each line in the input file, #", "# input data after applying the configured delimiter input_data = [process_item(item.strip()) for item", "FileNotFoundError: print(f'No input data file found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now()", "if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace (e.g. newlines) if", "(looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n':", "input data directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v =", "{len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size = sys.getsizeof(input_data)", "in the raw # input data after applying the configured delimiter input_data =", "raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self):", "def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file,", "def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity def get_input_file_name(self): path", "for item in raw_input.split(delimiter)] else: # Process the raw input data directly input_data", "sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)})", "{len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v:", "delimiter == '\\n': # Trim whitespace from and process each line in the", "def process_input_data(self, input_data): return input_data def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter", "Copy the data so each part is free to manipulate it without #", "datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No input data file found (looked", "_part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def", "input_desc = f'is {size} bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]')", "process_input_data(self, input_data): return input_data def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item", "= datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No input data file found", "end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if", "= (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines'", "max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part, solver", "process_input_item(self, input_line): return input_line def process_input_data(self, input_data): return input_data def get_input(self): input_file =", "solver in solvers: if self.input_delimiter: # Copy the data so each part is", "NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def solve(self): self._do_solve([(1, self._part1),", "line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace (e.g. newlines) if delimiter:", "the input list by overriding _process_input_item(). # Set to None to read the", "input if max_v: sample = '**SAMPLE** ' if self.sample else '' print('=' *", "# Trim whitespace from and process each item in the raw # input", "solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def solve(self): self._do_solve([(1, self._part1), (2, self._part2)])", "f: if delimiter == '\\n': # Trim whitespace from and process each line", "', end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t", "# The delimiter to use to separate the input data into a list", "if delimiter == '\\n': # Trim whitespace from and process each line in", "processed prior to # being added to the input list by overriding _process_input_item().", "sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__)))", "'**SAMPLE** ' if self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input...", "os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input' return os.path.join(path, filename) def process_input_item(self,", "t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)}", "the input data into a list for subsequent # processing. E.g. '\\n', ',',", "Trim whitespace from and process each item in the raw # input data", "lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc", "input data file found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds()", "'\\n': # Trim whitespace from and process each line in the input file,", "= os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input' return os.path.join(path, filename) def", "verbosity=2): self.sample = sample self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename", "Trim whitespace from and process each line in the input file, # skipping", "[process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process the raw input data directly", "part_input_data = input_data if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start", "Run solvers for part, solver in solvers: if self.input_delimiter: # Copy the data", "end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part, solver in solvers: if", "being added to the input list by overriding _process_input_item(). # Set to None", "input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else:", "# being added to the input list by overriding _process_input_item(). # Set to", "'\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items'", "# Copy the data so each part is free to manipulate it without", "def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)])", "if max_v: sample = '**SAMPLE** ' if self.sample else '' print('=' * 50,", "= input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving ', end='') print('Part {}...", "[{t}s]') # Run solvers for part, solver in solvers: if self.input_delimiter: # Copy", "', end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No input", "for subsequent # processing. E.g. '\\n', ',', etc. Delimited items can be processed", "it without # affecting subsequent parts part_input_data = input_data[:] else: part_input_data = input_data", "else: # Process the raw input data directly input_data = self.process_input_data(raw_input) return input_data", "in {self.get_input_file_name()}).') return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc", "self.verbosity max_v = v > 1 line_endings = '\\n' if max_v else ''", "return t = (datetime.datetime.now() - start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has", "line in f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip()", "# trim whitespace (e.g. newlines) if delimiter: # Trim whitespace from and process", "self.verbosity = verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample", "input_data): return input_data def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item =", "# Process the raw input data directly input_data = self.process_input_data(raw_input) return input_data def", "input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v = self.verbosity max_v =", "as f: if delimiter == '\\n': # Trim whitespace from and process each", "if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run solvers for part,", "each part is free to manipulate it without # affecting subsequent parts part_input_data", "print('\\n', '=' * 50, sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data):", "if max_v: print('\\n', '=' * 50, sep='') def _part1(self, input_data): raise NotImplementedError() def", "and process each line in the input file, # skipping any blank lines", "sys class Puzzle: # The delimiter to use to separate the input data", "whitespace from and process each item in the raw # input data after", "start = datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No input data file", "input_data def get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with", "from and process each line in the input file, # skipping any blank", "print(f'No input data file found (looked in {self.get_input_file_name()}).') return t = (datetime.datetime.now() -", "data directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v = self.verbosity", "Process the raw input data directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self,", "applying the configured delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else: #", "else: part_input_data = input_data if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings)", "with open(input_file, 'r') as f: if delimiter == '\\n': # Trim whitespace from", "'' # Get input if max_v: sample = '**SAMPLE** ' if self.sample else", "separate the input data into a list for subsequent # processing. E.g. '\\n',", "each item in the raw # input data after applying the configured delimiter", "'' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now()", "t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t))", "item in raw_input.split(delimiter)] else: # Process the raw input data directly input_data =", "whole. In this case, data can be processed # by overriding _process_input_data(). input_delimiter", "the configured delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process", "in the input file, # skipping any blank lines input_data = [] for", "print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data)", "sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self):", "list for subsequent # processing. E.g. '\\n', ',', etc. Delimited items can be", "to manipulate it without # affecting subsequent parts part_input_data = input_data[:] else: part_input_data", "os import sys class Puzzle: # The delimiter to use to separate the", "= '\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity def", "else: raw_input = f.read().strip() # trim whitespace (e.g. newlines) if delimiter: # Trim", "open(input_file, 'r') as f: if delimiter == '\\n': # Trim whitespace from and", "line_endings = '\\n' if max_v else '' # Get input if max_v: sample", "return input_data def _do_solve(self, solvers): v = self.verbosity max_v = v > 1", "solution = solver(part_input_data) t = (datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='')", "def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input' return", "in solvers: if self.input_delimiter: # Copy the data so each part is free", "if self.input_delimiter: # Copy the data so each part is free to manipulate", "datetime import inspect import os import sys class Puzzle: # The delimiter to", "else 'input' return os.path.join(path, filename) def process_input_item(self, input_line): return input_line def process_input_data(self, input_data):", "input file, # skipping any blank lines input_data = [] for line in", "self.sample else '' print('=' * 50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start", "by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample = sample", "import sys class Puzzle: # The delimiter to use to separate the input", "list by overriding _process_input_item(). # Set to None to read the data in", "NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2,", "'\\n', ',', etc. Delimited items can be processed prior to # being added", "data after applying the configured delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)]", "',', etc. Delimited items can be processed prior to # being added to", "size = sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v: print('Input ', end='')", "each line in the input file, # skipping any blank lines input_data =", "f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc = f'is {size} bytes' if", "= sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v: print('Input ', end='') print(f'{input_desc}", "(datetime.datetime.now() - start).total_seconds() if max_v: print('Solution: ', end='') print('{} [{}s]'.format(solution, t)) if max_v:", "input_line def process_input_data(self, input_data): return input_data def get_input(self): input_file = self.get_input_file_name() delimiter =", "_part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)])", "Get input if max_v: sample = '**SAMPLE** ' if self.sample else '' print('='", "verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input'", "is free to manipulate it without # affecting subsequent parts part_input_data = input_data[:]", "Set to None to read the data in whole. In this case, data", "return input_line def process_input_data(self, input_data): return input_data def get_input(self): input_file = self.get_input_file_name() delimiter", "f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size =", "> 1 line_endings = '\\n' if max_v else '' # Get input if", "= f'has {len(input_data)} lines' elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size", "= self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as f: if delimiter ==", "lines input_data = [] for line in f.readlines(): line = line.strip() if line:", "Delimited items can be processed prior to # being added to the input", "print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data = self.get_input() except FileNotFoundError: print(f'No", "skipping any blank lines input_data = [] for line in f.readlines(): line =", "50, sep='') def _part1(self, input_data): raise NotImplementedError() def _part2(self, input_data): raise NotImplementedError() def", "raw input data directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v", "The delimiter to use to separate the input data into a list for", "process each item in the raw # input data after applying the configured", "path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else 'input' return os.path.join(path, filename)", "self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as f: if delimiter == '\\n':", "file, # skipping any blank lines input_data = [] for line in f.readlines():", "f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim", "t)) if max_v: print('\\n', '=' * 50, sep='') def _part1(self, input_data): raise NotImplementedError()", "= self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v = self.verbosity max_v = v", "the raw # input data after applying the configured delimiter input_data = [process_item(item.strip())", "# skipping any blank lines input_data = [] for line in f.readlines(): line", "items' else: size = sys.getsizeof(input_data) input_desc = f'is {size} bytes' if max_v: print('Input", "input_data if max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now()", "= [process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process the raw input data", "part, solver in solvers: if self.input_delimiter: # Copy the data so each part", "if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter: input_desc =", "raw # input data after applying the configured delimiter input_data = [process_item(item.strip()) for", "# affecting subsequent parts part_input_data = input_data[:] else: part_input_data = input_data if max_v:", "delimiter input_data = [process_item(item.strip()) for item in raw_input.split(delimiter)] else: # Process the raw", "raw_input.split(delimiter)] else: # Process the raw input data directly input_data = self.process_input_data(raw_input) return", "affecting subsequent parts part_input_data = input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving", "trim whitespace (e.g. newlines) if delimiter: # Trim whitespace from and process each", "print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution = solver(part_input_data) t = (datetime.datetime.now()", "def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def solve(self): self._do_solve([(1, self._part1), (2,", "= [] for line in f.readlines(): line = line.strip() if line: input_data.append(process_item(line)) else:", "= verbosity def get_input_file_name(self): path = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__))) filename = 'sample' if self.sample else", "be processed prior to # being added to the input list by overriding", "subsequent parts part_input_data = input_data[:] else: part_input_data = input_data if max_v: print('\\nSolving ',", "class Puzzle: # The delimiter to use to separate the input data into", "into a list for subsequent # processing. E.g. '\\n', ',', etc. Delimited items", "delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as f: if delimiter", "Puzzle: # The delimiter to use to separate the input data into a", "get_input(self): input_file = self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file, 'r')", "and process each item in the raw # input data after applying the", "== '\\n': # Trim whitespace from and process each line in the input", "delimiter: # Trim whitespace from and process each item in the raw #", "= line.strip() if line: input_data.append(process_item(line)) else: raw_input = f.read().strip() # trim whitespace (e.g.", "end='') print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50, sep='') def _part1(self,", "input_line): return input_line def process_input_data(self, input_data): return input_data def get_input(self): input_file = self.get_input_file_name()", "self.process_input_data(raw_input) return input_data def _do_solve(self, solvers): v = self.verbosity max_v = v >", "# Get input if max_v: sample = '**SAMPLE** ' if self.sample else ''", "print('{} [{}s]'.format(solution, t)) if max_v: print('\\n', '=' * 50, sep='') def _part1(self, input_data):", "processed # by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self, sample=False, verbosity=2): self.sample", "'r') as f: if delimiter == '\\n': # Trim whitespace from and process", "item in the raw # input data after applying the configured delimiter input_data", "- start).total_seconds() if self.input_delimiter == '\\n': input_desc = f'has {len(input_data)} lines' elif self.input_delimiter:", "50, f'\\n\\nProcessing {sample}', end='') print('Input... ', end=line_endings) start = datetime.datetime.now() try: input_data =", "= f'is {size} bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') #", "the input file, # skipping any blank lines input_data = [] for line", "max_v: print('\\nSolving ', end='') print('Part {}... '.format(part), end=line_endings) start = datetime.datetime.now() solution =", "data so each part is free to manipulate it without # affecting subsequent", "max_v: sample = '**SAMPLE** ' if self.sample else '' print('=' * 50, f'\\n\\nProcessing", "= self.process_input_item with open(input_file, 'r') as f: if delimiter == '\\n': # Trim", "input_data = self.get_input() except FileNotFoundError: print(f'No input data file found (looked in {self.get_input_file_name()}).')", "else '' # Get input if max_v: sample = '**SAMPLE** ' if self.sample", "__init__(self, sample=False, verbosity=2): self.sample = sample self.verbosity = verbosity def get_input_file_name(self): path =", "import datetime import inspect import os import sys class Puzzle: # The delimiter", "raise NotImplementedError() def solve_part1(self): self._do_solve([(1, self._part1)]) def solve_part2(self): self._do_solve([(2, self._part2)]) def solve(self): self._do_solve([(1,", "solvers for part, solver in solvers: if self.input_delimiter: # Copy the data so", "E.g. '\\n', ',', etc. Delimited items can be processed prior to # being", "part is free to manipulate it without # affecting subsequent parts part_input_data =", "input data after applying the configured delimiter input_data = [process_item(item.strip()) for item in", "f'is {size} bytes' if max_v: print('Input ', end='') print(f'{input_desc} ({type(input_data)}) [{t}s]') # Run", "to read the data in whole. In this case, data can be processed", "self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc = f'is", "_do_solve(self, solvers): v = self.verbosity max_v = v > 1 line_endings = '\\n'", "any blank lines input_data = [] for line in f.readlines(): line = line.strip()", "None to read the data in whole. In this case, data can be", "self.input_delimiter: # Copy the data so each part is free to manipulate it", "the raw input data directly input_data = self.process_input_data(raw_input) return input_data def _do_solve(self, solvers):", "# Run solvers for part, solver in solvers: if self.input_delimiter: # Copy the", "def _do_solve(self, solvers): v = self.verbosity max_v = v > 1 line_endings =", "elif self.input_delimiter: input_desc = f'has {len(input_data)} items' else: size = sys.getsizeof(input_data) input_desc =", "to # being added to the input list by overriding _process_input_item(). # Set", "if delimiter: # Trim whitespace from and process each item in the raw", "this case, data can be processed # by overriding _process_input_data(). input_delimiter = '\\n'", "data can be processed # by overriding _process_input_data(). input_delimiter = '\\n' def __init__(self,", "= self.get_input() except FileNotFoundError: print(f'No input data file found (looked in {self.get_input_file_name()}).') return", "= '\\n' if max_v else '' # Get input if max_v: sample =", "self.get_input_file_name() delimiter = self.input_delimiter process_item = self.process_input_item with open(input_file, 'r') as f: if" ]
[ "print(data) return True def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret)", "True def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream =", "credentials.py file with the following keys from credentials import ckey, csecret, atoken, asecret", "the following keys from credentials import ckey, csecret, atoken, asecret from tweepy import", "ckey, csecret, atoken, asecret from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener", "csecret, atoken, asecret from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener class", "keys from credentials import ckey, csecret, atoken, asecret from tweepy import Stream, OAuthHandler", "def on_data(self, data): print(data) return True def on_error(self, status): print(status) auth = OAuthHandler(ckey,", "import Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data): print(data)", "create a credentials.py file with the following keys from credentials import ckey, csecret,", "class listener(StreamListener): def on_data(self, data): print(data) return True def on_error(self, status): print(status) auth", "def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream = Stream(auth,", "credentials import ckey, csecret, atoken, asecret from tweepy import Stream, OAuthHandler from tweepy.streaming", "return True def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream", "from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self,", "from credentials import ckey, csecret, atoken, asecret from tweepy import Stream, OAuthHandler from", "tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data): print(data) return True def on_error(self,", "asecret from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def", "import ckey, csecret, atoken, asecret from tweepy import Stream, OAuthHandler from tweepy.streaming import", "Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data): print(data) return", "listener(StreamListener): def on_data(self, data): print(data) return True def on_error(self, status): print(status) auth =", "following keys from credentials import ckey, csecret, atoken, asecret from tweepy import Stream,", "file with the following keys from credentials import ckey, csecret, atoken, asecret from", "status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream = Stream(auth, listener()) twitterStream.filter(track=['python'])", "on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream = Stream(auth, listener())", "tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data):", "StreamListener class listener(StreamListener): def on_data(self, data): print(data) return True def on_error(self, status): print(status)", "on_data(self, data): print(data) return True def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret)", "# create a credentials.py file with the following keys from credentials import ckey,", "atoken, asecret from tweepy import Stream, OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener):", "import StreamListener class listener(StreamListener): def on_data(self, data): print(data) return True def on_error(self, status):", "from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data): print(data) return True def", "a credentials.py file with the following keys from credentials import ckey, csecret, atoken,", "OAuthHandler from tweepy.streaming import StreamListener class listener(StreamListener): def on_data(self, data): print(data) return True", "with the following keys from credentials import ckey, csecret, atoken, asecret from tweepy", "data): print(data) return True def on_error(self, status): print(status) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken," ]
[ "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'),", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations = [", "<reponame>techlib/czechelib-stats # Generated by Django 3.1.3 on 2020-11-20 13:38 from django.db import migrations,", "dependencies = [ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True,", "] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info',", "[ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info', field=models.JSONField(default=dict, help_text='Internal info'),", "operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info', field=models.JSONField(default=dict,", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi',", "'0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations", "2020-11-20 13:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ]", "Generated by Django 3.1.3 on 2020-11-20 13:38 from django.db import migrations, models import", "[ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ),", "class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials',", "3.1.3 on 2020-11-20 13:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params',", "on 2020-11-20 13:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "13:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField(", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField(", "= [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info', field=models.JSONField(default=dict, help_text='Internal", "model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info', field=models.JSONField(default=dict, help_text='Internal info'), ), ]", "= [ ('sushi', '0037_broken_credentials'), ] operations = [ migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict),", "Django 3.1.3 on 2020-11-20 13:38 from django.db import migrations, models import django.db.models.deletion class", "by Django 3.1.3 on 2020-11-20 13:38 from django.db import migrations, models import django.db.models.deletion", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('sushi', '0037_broken_credentials'), ] operations =", "# Generated by Django 3.1.3 on 2020-11-20 13:38 from django.db import migrations, models", "migrations.AlterField( model_name='sushicredentials', name='extra_params', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='sushifetchattempt', name='processing_info', field=models.JSONField(default=dict, help_text='Internal info'), )," ]
[ "tf import numpy as np BINARY_SEARCH_STEPS = 9 # number of times to", "defenses. Returns adversarial examples for the supplied model. confidence: Confidence of adversarial examples:", "we use to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size,", "self.TARGETED: # if targetted, optimize for making the other class most likely for", "under the BSD 2-Clause licence, ## contained in the LICENCE file in this", "pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess", "tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x", "specific class? or just be wrong? CONFIDENCE = 0 # how strong the", "to less accurate results, default 1e-2 TARGETED = False # should we target", "This attack is the most efficient and should be used as the primary", "then targets are the original class labels. \"\"\" r = [] # print('go", "gradient descent ABORT_EARLY = True # if we stop improving, abort gradient descent", "[0,1] indicating the expected loss of the learner \"\"\" if not isinstance(x, (float,", "clear improvement for this value of c bestl2[e] = l2 bestscore[e] = currLoss", "targets, weights): \"\"\" Perform the L_2 attack on the given images for the", "currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner if currLoss >", "not isinstance(x, (float, int, np.int64)): x = np.copy(x) for v in x: #", "how strong the adversarial example should be INITIAL_CONST = 1e-3 # the initial", "of adversarial examples: higher produces examples that are farther away, but more strongly", "to run simultaneously. targeted: True if we should perform a targetted attack, False", "up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss =", "labs, weights): \"\"\" Run the attack on a batch of images and labels.", "self.batch_size # convert to tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul *", "INITIAL_CONST = 1e-3 # the initial constant c to pick as a first", "2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX", "= tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1)", "= [] others = [] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) *", "the variables to initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const))", "self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs,", "MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent ABORT_EARLY =", "self.boxplus)),[1,2,3]) # compute the probability of the label class versus the maximum other", "= tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals", "self.TARGETED: return np.dot(x == y, weights) else: return np.dot(x != y, weights) batch_size", "+ boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus", "over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print", "algorithm. Smaller values produce better results but are slower to converge. binary_search_steps: The", "values are more accurate; setting too small will require a large learning rate", "is not important. boxmin: Minimum pixel value (default -0.5). boxmax: Maximum pixel value", "for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else:", "self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size", "labels. If self.targeted is false, then targets are the original class labels. \"\"\"", "tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) # set the", "the expected loss of the learner \"\"\" if not isinstance(x, (float, int, np.int64)):", "= imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set", "\", outer_step, \"Current C \", CONST, lower_bound, upper_bound prev = 1e10 # used", "reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 =", "range(self.MAX_ITERATIONS): # perform the attack _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss,", "l2_attack.py -- attack a network optimizing for l_2 distance ## ## Copyright (C)", "the learner \"\"\" if not isinstance(x, (float, int, np.int64)): x = np.copy(x) for", "to decide on the next value # adjust the constant as needed for", "(lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] =", "improving, abort gradient descent early LEARNING_RATE = 1e-2 # larger values converge faster", "keep track of variables we're creating start_vars = set(x.name for x in tf.global_variables())", "for e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if", "initial constant c to pick as a first guess class CarliniL2: def __init__(self,", "targetted, optimize for making the other class most likely for i in xrange(self.num_models):", "class versus the maximum other reals = [] others = [] for i", "reals, others loss1list = [] if self.TARGETED: # if targetted, optimize for making", "self.loss = self.loss1 + self.loss2 self.reals = reals self.others = others # Setup", "optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x", "are the variables to initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab))", "self.num_models, self.num_labels) # if iteration % 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1,", "= ii if currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2", "if we should perform a targetted attack, False otherwise. learning_rate: The learning rate", "Number of attacks to run simultaneously. targeted: True if we should perform a", "we don't have to send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab,", "by <NAME> 2017 import tensorflow as tf import numpy as np BINARY_SEARCH_STEPS =", "= len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're going", "constant with binary search MAX_ITERATIONS = 10000 # number of iterations to perform", "if we stop improving, abort gradient descent early LEARNING_RATE = 1e-2 # larger", "tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars =", "= l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss", "improvement for this value of c bestl2[e] = l2 bestscore[e] = currLoss if", "LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST,", "boxmax self.boxmul = (boxmax - boxmin) / 2. self.boxplus = (boxmin + boxmax)", "to pick as a first guess class CarliniL2: def __init__(self, sess, models, batch_size=1,", "ABORT_EARLY = True # if we stop improving, abort gradient descent early LEARNING_RATE", "+ self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for model", "self.others = reals, others loss1list = [] if self.TARGETED: # if targetted, optimize", "r = [] # print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i)", "# sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list))", "loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize", "the given targets. If self.targeted is true, then the targets represents the target", "in models] # distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul", "num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image,", ".10) == 0: if l > prev*.9999: break prev = l for e,(l2,sc,ii)", "loss of the learner if currLoss > bestscore[e]: # we've found a clear", "value # adjust the constant as needed for e in range(batch_size): if bestscore[e]", "and confidence. If binary_search_steps is large, the initial constant is not important. boxmin:", "we should perform a targetted attack, False otherwise. learning_rate: The learning rate for", "tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to keep bounded from boxmin to", "descent early LEARNING_RATE = 1e-2 # larger values converge faster to less accurate", "this class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] -", "up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r)", "int, np.int64)): x = np.copy(x) for v in x: # update the target", "self.boxmul = (boxmax - boxmin) / 2. self.boxplus = (boxmin + boxmax) /", "num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE", "adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size", "contained in the LICENCE file in this directory. ## Modified by <NAME> 2017", "def attack(self, imgs, targets, weights): \"\"\" Perform the L_2 attack on the given", "= tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of the label", "o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e] and l2 <", "on the next value # adjust the constant as needed for e in", "bestscore[e]: # we've found a clear improvement for this value of c bestl2[e]", "be used as the primary attack to evaluate potential defenses. Returns adversarial examples", "tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more efficient in sending data to", "lower and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound =", "models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps =", "examples for the supplied model. confidence: Confidence of adversarial examples: higher produces examples", "/ 2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus # prediction", "check if we should abort search if we're getting nowhere. (check every 10%)", "initial_const: The initial tradeoff-constant to use to tune the relative importance of distance", "to evaluate potential defenses. Returns adversarial examples for the supplied model. confidence: Confidence", "# and here's what we use to assign them self.assign_timg = tf.placeholder(tf.float32, shape)", "the primary attack to evaluate potential defenses. Returns adversarial examples for the supplied", "tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real)", "the other class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i]", "x is an np array of shape num_models x num_classes y is the", "is an np array of shape num_models x num_classes y is the true", "c, now need to decide on the next value # adjust the constant", "batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS,", "## l2_attack.py -- attack a network optimizing for l_2 distance ## ## Copyright", "= reals self.others = others # Setup the adam optimizer and keep track", "run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self,", "needed for e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e])", "= [] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other", "initial_const = INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\" The L_2 optimized", "upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if", "Returns adversarial examples for the supplied model. confidence: Confidence of adversarial examples: higher", "## ## This program is licenced under the BSD 2-Clause licence, ## contained", "l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii # finished trying out", "import tensorflow as tf import numpy as np BINARY_SEARCH_STEPS = 9 # number", "given targets. If self.targeted is true, then the targets represents the target labels.", "be wrong? CONFIDENCE = 0 # how strong the adversarial example should be", "The initial tradeoff-constant to use to tune the relative importance of distance and", "wrong? CONFIDENCE = 0 # how strong the adversarial example should be INITIAL_CONST", "INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\" The L_2 optimized attack. This", "simultaneously. targeted: True if we should perform a targetted attack, False otherwise. learning_rate:", "use to tune the relative importance of distance and confidence. If binary_search_steps is", "the given images for the given targets. If self.targeted is true, then the", "the best l2, score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size", "= [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): #", "state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore =", "+ self.boxplus)),[1,2,3]) # compute the probability of the label class versus the maximum", "## Modified by <NAME> 2017 import tensorflow as tf import numpy as np", "target one specific class? or just be wrong? CONFIDENCE = 0 # how", "tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the model", "tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals = reals self.others =", "self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models])", "target scores for each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y]", "upper bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 #", "% 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we", "the next value # adjust the constant as needed for e in range(batch_size):", "variables we're creating start_vars = set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)", "-0.5, boxmax = 0.5): \"\"\" The L_2 optimized attack. This attack is the", "an np array of shape num_models x num_classes y is the true label", "others.append(other) self.reals, self.others = reals, others loss1list = [] if self.TARGETED: # if", "self.loss1list = loss1list # TODO: remove # sum up the losses self.loss2 =", "attack a network optimizing for l_2 distance ## ## Copyright (C) 2016, <NAME>", "CarliniL2: def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate", "True if we should perform a targetted attack, False otherwise. learning_rate: The learning", "self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of the label class versus the", "self.initial_const = initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels = num_labels shape", "= optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x in end_vars", "weights): \"\"\" Run the attack on a batch of images and labels. \"\"\"", "200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should", "1e-2 TARGETED = False # should we target one specific class? or just", "examples: higher produces examples that are farther away, but more strongly classified as", "for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars =", "the BSD 2-Clause licence, ## contained in the LICENCE file in this directory.", "/ 2. self.boxplus = (boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier +", "self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove # sum up the losses self.loss2", "o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state.", "[0.0]*batch_size # set the variables so that we don't have to send them", "better results but are slower to converge. binary_search_steps: The number of times we", "weights)) return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run the attack on", "L_2 attack on the given images for the given targets. If self.targeted is", "compareLoss(x, y): \"\"\" x is an np array of shape num_models x num_classes", "end_vars = tf.global_variables() new_vars = [x for x in end_vars if x.name not", "CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] =", "should be used as the primary attack to evaluate potential defenses. Returns adversarial", "image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted", "the attack on a batch of images and labels. \"\"\" def compareLoss(x, y):", "perform gradient descent ABORT_EARLY = True # if we stop improving, abort gradient", "self.num_models = len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're", "Smaller values produce better results but are slower to converge. binary_search_steps: The number", "of c bestl2[e] = l2 bestscore[e] = currLoss if currLoss == bestscore[e] and", "2. self.boxplus = (boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg)", "data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const =", "1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others", "losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 +", "self.boxplus) / self.boxmul * 0.999999) # set the lower and upper bounds accordingly", "Maximum pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels", "0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED", "attack(self, imgs, targets, weights): \"\"\" Perform the L_2 attack on the given images", "- self.boxplus) / self.boxmul * 0.999999) # set the lower and upper bounds", "[1e10]*batch_size bestscore = [0.0]*batch_size # set the variables so that we don't have", "# set the variables so that we don't have to send them over", "= [] # print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size],", "rate for the attack algorithm. Smaller values produce better results but are slower", "setting too small will require a large learning rate and will produce poor", "attack on the given images for the given targets. If self.targeted is true,", "bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set the variables so that we", "# expected loss of the learner if currLoss > bestscore[e]: # we've found", "l2 bestscore[e] = currLoss if currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e]", "initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels)", "l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] =", "gradient descent early LEARNING_RATE = 1e-2 # larger values converge faster to less", "upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 #", "number of times to adjust the constant with binary search MAX_ITERATIONS = 10000", "upper_bound = np.ones(batch_size)*1e10 # the best l2, score, and image attack o_bestl2 =", "i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs,", "< 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 # return", "we target one specific class? or just be wrong? CONFIDENCE = 0 #", "= 9 # number of times to adjust the constant with binary search", "perform binary search to find the optimal tradeoff-constant between distance and confidence. max_iterations:", "## contained in the LICENCE file in this directory. ## Modified by <NAME>", "the optimal tradeoff-constant between distance and confidence. max_iterations: The maximum number of iterations.", "self.reals, self.others = reals, others loss1list = [] if self.TARGETED: # if targetted,", "individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x =", "break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) #", "range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9:", "to tune the relative importance of distance and confidence. If binary_search_steps is large,", "if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e])", "v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x, 1) # these", "= labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set the variables so", "targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early", "learning_rate: The learning rate for the attack algorithm. Smaller values produce better results", "constant c to pick as a first guess class CarliniL2: def __init__(self, sess,", "* self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of the label class versus", "False # should we target one specific class? or just be wrong? CONFIDENCE", "hypothesis if self.TARGETED: return np.dot(x == y, weights) else: return np.dot(x != y,", "= 1e10 # used to be e6 for iteration in range(self.MAX_ITERATIONS): # perform", "in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i]", "xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] -", "is the most efficient and should be used as the primary attack to", "tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use to", "self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size", "(default -0.5). boxmax: Maximum pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels =", "prev = 1e10 # used to be e6 for iteration in range(self.MAX_ITERATIONS): #", "learner \"\"\" if not isinstance(x, (float, int, np.int64)): x = np.copy(x) for v", "= [0.0]*batch_size # set the variables so that we don't have to send", "are farther away, but more strongly classified as adversarial. batch_size: Number of attacks", "we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to", "a clear improvement for this value of c bestl2[e] = l2 bestscore[e] =", "a large learning rate and will produce poor results. abort_early: If true, allows", "for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self,", "y is the true label or target label of the class returns a", "models] # distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul +", "Modified by <NAME> 2017 import tensorflow as tf import numpy as np BINARY_SEARCH_STEPS", "guess class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted =", "= tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and", "* (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove #", "gets stuck. initial_const: The initial tradeoff-constant to use to tune the relative importance", "# if untargeted, optimize for making this class least likely. for i in", "be more efficient in sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab", "the learner if currLoss > bestscore[e]: # we've found a clear improvement for", "pixel value (default -0.5). boxmax: Maximum pixel value (default 0.5). \"\"\" image_size, num_channels,", "tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to keep", "larger values converge faster to less accurate results, default 1e-2 TARGETED = False", "the constant as needed for e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e]", "currLoss > bestscore[e]: # we've found a clear improvement for this value of", "and image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for", "but more strongly classified as adversarial. batch_size: Number of attacks to run simultaneously.", "This program is licenced under the BSD 2-Clause licence, ## contained in the", "large, the initial constant is not important. boxmin: Minimum pixel value (default -0.5).", "models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS =", "= l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e]", "descent ABORT_EARLY = True # if we stop improving, abort gradient descent early", "= 0.5): \"\"\" The L_2 optimized attack. This attack is the most efficient", "file in this directory. ## Modified by <NAME> 2017 import tensorflow as tf", "higher produces examples that are farther away, but more strongly classified as adversarial.", "going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be", "np.dot(x != y, weights) batch_size = self.batch_size # convert to tanh-space imgs =", "as needed for e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e],", "if targetted, optimize for making the other class most likely for i in", "max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const", "bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] =", "distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) #", "and keep track of variables we're creating start_vars = set(x.name for x in", "labels. \"\"\" def compareLoss(x, y): \"\"\" x is an np array of shape", "network optimizing for l_2 distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ##", "bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss", "number of iterations to perform gradient descent ABORT_EARLY = True # if we", "if we should abort search if we're getting nowhere. (check every 10%) if", "the label class versus the maximum other reals = [] others = []", "xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list #", "(others[i] - reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize for making this", "- others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove # sum up", "batch_size: Number of attacks to run simultaneously. targeted: True if we should perform", "importance of distance and confidence. If binary_search_steps is large, the initial constant is", "models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS", "model self.outputs = [model.predict(self.newimg) for model in models] # distance to the input", "targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early", "learning rate and will produce poor results. abort_early: If true, allows early aborts", "[x for x in end_vars if x.name not in start_vars] # these are", "shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32,", "batch of images and labels. \"\"\" def compareLoss(x, y): \"\"\" x is an", "self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform the L_2", "confidence = CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations", "input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the probability", "self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should abort search if we're getting", "\"\"\" x is an np array of shape num_models x num_classes y is", "next value # adjust the constant as needed for e in range(batch_size): if", "tradeoff-constant to use to tune the relative importance of distance and confidence. If", "(default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess", "for model in models] # distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg)", "abort_early: If true, allows early aborts if gradient descent gets stuck. initial_const: The", "other reals = [] others = [] for i in xrange(self.num_models): real =", "target label of the class returns a number in [0,1] indicating the expected", "L_2 optimized attack. This attack is the most efficient and should be used", "= [] if self.TARGETED: # if targetted, optimize for making the other class", "# the variable we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these", "is licenced under the BSD 2-Clause licence, ## contained in the LICENCE file", "# these are variables to be more efficient in sending data to tf", "# the resulting image, tanh'd to keep bounded from boxmin to boxmax self.boxmul", "in the LICENCE file in this directory. ## Modified by <NAME> 2017 import", "number of iterations. Larger values are more accurate; setting too small will require", "Larger values are more accurate; setting too small will require a large learning", "potential defenses. Returns adversarial examples for the supplied model. confidence: Confidence of adversarial", "x num_classes y is the true label or target label of the class", "accurate results, default 1e-2 TARGETED = False # should we target one specific", "= loss1list # TODO: remove # sum up the losses self.loss2 = tf.reduce_sum(self.l2dist)", "= np.argmax(x, 1) # these are the predictions of each hypothesis if self.TARGETED:", "of the class returns a number in [0,1] indicating the expected loss of", "set the lower and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const", "> prev*.9999: break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc,", "currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2 if currLoss >", "tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's", "track of variables we're creating start_vars = set(x.name for x in tf.global_variables()) optimizer", "def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate =", "strongly classified as adversarial. batch_size: Number of attacks to run simultaneously. targeted: True", "x = np.argmax(x, 1) # these are the predictions of each hypothesis if", "e6 for iteration in range(self.MAX_ITERATIONS): # perform the attack _, l, l2s, scores,", "and confidence. max_iterations: The maximum number of iterations. Larger values are more accurate;", "batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step \", outer_step,", "best l2, score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack", "\"\"\" Perform the L_2 attack on the given images for the given targets.", "allows early aborts if gradient descent gets stuck. initial_const: The initial tradeoff-constant to", "* 0.999999) # set the lower and upper bounds accordingly lower_bound = np.zeros(batch_size)", "= batch_size self.num_models = len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the", "tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models),", "primary attack to evaluate potential defenses. Returns adversarial examples for the supplied model.", "= [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init)", "other class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] -", "of variables we're creating start_vars = set(x.name for x in tf.global_variables()) optimizer =", "over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more efficient in", "attack _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores", "we've found a clear improvement for this value of c bestl2[e] = l2", "label or target label of the class returns a number in [0,1] indicating", "= [model.predict(self.newimg) for model in models] # distance to the input data self.l2dist", "and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10", "i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else: #", "<NAME> 2017 import tensorflow as tf import numpy as np BINARY_SEARCH_STEPS = 9", "values converge faster to less accurate results, default 1e-2 TARGETED = False #", "confidence. max_iterations: The maximum number of iterations. Larger values are more accurate; setting", "the relative importance of distance and confidence. If binary_search_steps is large, the initial", "self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS", "= BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin =", "new_vars = [x for x in end_vars if x.name not in start_vars] #", "isinstance(x, (float, int, np.int64)): x = np.copy(x) for v in x: # update", "np.argmax(x, 1) # these are the predictions of each hypothesis if self.TARGETED: return", "# these are the predictions of each hypothesis if self.TARGETED: return np.dot(x ==", "if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x, 1)", "of shape num_models x num_classes y is the true label or target label", "produces examples that are farther away, but more strongly classified as adversarial. batch_size:", "images for the given targets. If self.targeted is true, then the targets represents", "in start_vars] # these are the variables to initialize when we run self.setup", "= np.ones(batch_size)*1e10 # the best l2, score, and image attack o_bestl2 = [1e10]*batch_size", "# perform the attack _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist,", "e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e]", "+ upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e]", "# number of times to adjust the constant with binary search MAX_ITERATIONS =", "True # if we stop improving, abort gradient descent early LEARNING_RATE = 1e-2", "optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more efficient", "confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels = num_labels", "image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step", "of distance and confidence. If binary_search_steps is large, the initial constant is not", "= True # if we stop improving, abort gradient descent early LEARNING_RATE =", "Minimum pixel value (default -0.5). boxmax: Maximum pixel value (default 0.5). \"\"\" image_size,", "abort gradient descent early LEARNING_RATE = 1e-2 # larger values converge faster to", "\"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED =", "nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if", "[] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other =", "binary search to find the optimal tradeoff-constant between distance and confidence. max_iterations: The", "(boxmax - boxmin) / 2. self.boxplus = (boxmin + boxmax) / 2. self.newimg", "will require a large learning rate and will produce poor results. abort_early: If", "# number of iterations to perform gradient descent ABORT_EARLY = True # if", "to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def", "c bestl2[e] = l2 bestscore[e] = currLoss if currLoss == bestscore[e] and l2", "the most efficient and should be used as the primary attack to evaluate", "len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're going to", "initial tradeoff-constant to use to tune the relative importance of distance and confidence.", "in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] <", "= tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more efficient in sending data", "distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This program is", "nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) #", "to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const", "number of times we perform binary search to find the optimal tradeoff-constant between", "so that we don't have to send them over again self.sess.run(self.setup, {self.assign_timg: batch,", "versus the maximum other reals = [] others = [] for i in", "likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE)))", "= MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax =", "= binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size =", "self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs =", "self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list =", "o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii # finished trying out the adam", "shape = (batch_size,image_size,image_size,num_channels) # the variable we're going to optimize over modifier =", "array of shape num_models x num_classes y is the true label or target", "outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size]", "# if targetted, optimize for making the other class most likely for i", "faster to less accurate results, default 1e-2 TARGETED = False # should we", "early aborts if gradient descent gets stuck. initial_const: The initial tradeoff-constant to use", "self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2", "examples that are farther away, but more strongly classified as adversarial. batch_size: Number", "# these are the variables to initialize when we run self.setup = []", "in [0,1] indicating the expected loss of the learner \"\"\" if not isinstance(x,", "variable we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables", "if not isinstance(x, (float, int, np.int64)): x = np.copy(x) for v in x:", "bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] =", "adversarial examples for the supplied model. confidence: Confidence of adversarial examples: higher produces", "is large, the initial constant is not important. boxmin: Minimum pixel value (default", "start_vars = set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss,", "imgs, labs, weights): \"\"\" Run the attack on a batch of images and", "+ self.loss2 self.reals = reals self.others = others # Setup the adam optimizer", "l2 < bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2", "boxmax: Maximum pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels,", "classified as adversarial. batch_size: Number of attacks to run simultaneously. targeted: True if", "adversarial. batch_size: Number of attacks to run simultaneously. targeted: True if we should", "tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals = reals self.others = others #", "self.num_labels) # if iteration % 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2)))", "expected loss of the learner if currLoss > bestscore[e]: # we've found a", "we stop improving, abort gradient descent early LEARNING_RATE = 1e-2 # larger values", "self.targeted is true, then the targets represents the target labels. If self.targeted is", "of times to adjust the constant with binary search MAX_ITERATIONS = 10000 #", "= (batch_size,image_size,image_size,num_channels) # the variable we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32))", "self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size])", "self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform", "optimizer and keep track of variables we're creating start_vars = set(x.name for x", "them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) #", "upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2", "= [x for x in end_vars if x.name not in start_vars] # these", "we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def", "[-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal", "attack to evaluate potential defenses. Returns adversarial examples for the supplied model. confidence:", "of each hypothesis if self.TARGETED: return np.dot(x == y, weights) else: return np.dot(x", "should we target one specific class? or just be wrong? CONFIDENCE = 0", "example should be INITIAL_CONST = 1e-3 # the initial constant c to pick", "currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if", "np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2, score, and", "self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of", "# the initial constant c to pick as a first guess class CarliniL2:", "found a clear improvement for this value of c bestl2[e] = l2 bestscore[e]", "the adam optimizer for a particular c, now need to decide on the", "= 1e-2 # larger values converge faster to less accurate results, default 1e-2", "value (default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess =", "boxmin: Minimum pixel value (default -0.5). boxmax: Maximum pixel value (default 0.5). \"\"\"", "of attacks to run simultaneously. targeted: True if we should perform a targetted", "farther away, but more strongly classified as adversarial. batch_size: Number of attacks to", "\"Outer Step \", outer_step, \"Current C \", CONST, lower_bound, upper_bound prev = 1e10", "= -0.5, boxmax = 0.5): \"\"\" The L_2 optimized attack. This attack is", "Run the attack on a batch of images and labels. \"\"\" def compareLoss(x,", "times we perform binary search to find the optimal tradeoff-constant between distance and", "maximum number of iterations. Larger values are more accurate; setting too small will", "tradeoff-constant between distance and confidence. max_iterations: The maximum number of iterations. Larger values", "= tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the", "[] # print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size],", "## This program is licenced under the BSD 2-Clause licence, ## contained in", "= tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for", "search MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent ABORT_EARLY", "lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2,", "value (default -0.5). boxmax: Maximum pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels", "= LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const =", "the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1", "o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii # finished", "def compareLoss(x, y): \"\"\" x is an np array of shape num_models x", "the attack algorithm. Smaller values produce better results but are slower to converge.", "else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2", "a network optimizing for l_2 distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>.", "number in [0,1] indicating the expected loss of the learner \"\"\" if not", "convert to tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) #", "self.loss2 self.reals = reals self.others = others # Setup the adam optimizer and", "class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i]", "others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove # sum up the", "ii if currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e]", "the lower and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound", "adjust the constant as needed for e in range(batch_size): if bestscore[e] == 1.0:", "iterations. Larger values are more accurate; setting too small will require a large", "self.boxplus = (boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg) *", "predictions of each hypothesis if self.TARGETED: return np.dot(x == y, weights) else: return", "have to send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST,", "a targetted attack, False otherwise. learning_rate: The learning rate for the attack algorithm.", "the targets represents the target labels. If self.targeted is false, then targets are", "= currLoss if currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2", "as tf import numpy as np BINARY_SEARCH_STEPS = 9 # number of times", "in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab", "true, then the targets represents the target labels. If self.targeted is false, then", "targeted: True if we should perform a targetted attack, False otherwise. learning_rate: The", "+ self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove # sum up the losses", "self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals,", "<NAME> <<EMAIL>>. ## ## This program is licenced under the BSD 2-Clause licence,", "then the targets represents the target labels. If self.targeted is false, then targets", "= targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY =", "optimized attack. This attack is the most efficient and should be used as", "label class versus the maximum other reals = [] others = [] for", "# print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights))", "as np BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant", "program is licenced under the BSD 2-Clause licence, ## contained in the LICENCE", "should perform a targetted attack, False otherwise. learning_rate: The learning rate for the", "-0.5). boxmax: Maximum pixel value (default 0.5). \"\"\" image_size, num_channels, num_labels = models[0].image_size,", "if self.TARGETED: # if targetted, optimize for making the other class most likely", "Step \", outer_step, \"Current C \", CONST, lower_bound, upper_bound prev = 1e10 #", "adam optimizer for a particular c, now need to decide on the next", "of the label class versus the maximum other reals = [] others =", "for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init) batch =", "creating start_vars = set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train =", "evaluate potential defenses. Returns adversarial examples for the supplied model. confidence: Confidence of", "x.name not in start_vars] # these are the variables to initialize when we", "# larger values converge faster to less accurate results, default 1e-2 TARGETED =", "self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd", "The number of times we perform binary search to find the optimal tradeoff-constant", "= np.copy(x) for v in x: # update the target scores for each", "variables so that we don't have to send them over again self.sess.run(self.setup, {self.assign_timg:", "i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 -", "shape num_models x num_classes y is the true label or target label of", "* tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals = reals self.others = others", "reals self.others = others # Setup the adam optimizer and keep track of", "the variable we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are", "2-Clause licence, ## contained in the LICENCE file in this directory. ## Modified", "dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what", "l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size,", "iterations to perform gradient descent ABORT_EARLY = True # if we stop improving,", "the supplied model. confidence: Confidence of adversarial examples: higher produces examples that are", "between distance and confidence. max_iterations: The maximum number of iterations. Larger values are", "self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if l > prev*.9999: break prev", "we perform binary search to find the optimal tradeoff-constant between distance and confidence.", "* (others[i] - reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize for making", "the variables so that we don't have to send them over again self.sess.run(self.setup,", "* self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg)", "- (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list = []", "given images for the given targets. If self.targeted is true, then the targets", "and here's what we use to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab", "aborts if gradient descent gets stuck. initial_const: The initial tradeoff-constant to use to", "is false, then targets are the original class labels. \"\"\" r = []", "self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY", "others = [] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1)", "compute the probability of the label class versus the maximum other reals =", "targets. If self.targeted is true, then the targets represents the target labels. If", "bounds accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the", "upper_bound prev = 1e10 # used to be e6 for iteration in range(self.MAX_ITERATIONS):", "> bestscore[e]: # we've found a clear improvement for this value of c", "< bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e]", "we should abort search if we're getting nowhere. (check every 10%) if self.ABORT_EARLY", "upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] +", "-- attack a network optimizing for l_2 distance ## ## Copyright (C) 2016,", "a first guess class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE,", "start_vars] # these are the variables to initialize when we run self.setup =", "targetted attack, False otherwise. learning_rate: The learning rate for the attack algorithm. Smaller", "are more accurate; setting too small will require a large learning rate and", "self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels", "to be e6 for iteration in range(self.MAX_ITERATIONS): # perform the attack _, l,", "of iterations. Larger values are more accurate; setting too small will require a", "self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize for", "num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're going to optimize over modifier", "the class returns a number in [0,1] indicating the expected loss of the", "= tf.global_variables() new_vars = [x for x in end_vars if x.name not in", "def attack_batch(self, imgs, labs, weights): \"\"\" Run the attack on a batch of", "i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list =", "target labels. If self.targeted is false, then targets are the original class labels.", "== bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]:", "l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of", "self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for model in", "abort search if we're getting nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS", "= CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations =", "e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner", "class labels. \"\"\" r = [] # print('go up to',len(imgs)) for i in", "ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\" The L_2", "tune the relative importance of distance and confidence. If binary_search_steps is large, the", "self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models", "targets are the original class labels. \"\"\" r = [] # print('go up", "return np.dot(x == y, weights) else: return np.dot(x != y, weights) batch_size =", "bestl2[e] = l2 bestscore[e] = currLoss if currLoss == bestscore[e] and l2 <", "for making the other class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i]", "# print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\"", "not in start_vars] # these are the variables to initialize when we run", "= compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner if currLoss > bestscore[e]:", "loss of the learner \"\"\" if not isinstance(x, (float, int, np.int64)): x =", "xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else: # if untargeted,", "o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS):", "print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run", "self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform the", "imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) # set the lower", "weights}) # print \"Outer Step \", outer_step, \"Current C \", CONST, lower_bound, upper_bound", "for the given targets. If self.targeted is true, then the targets represents the", "# should we target one specific class? or just be wrong? CONFIDENCE =", "o_bestl2[e] = l2 o_bestattack[e] = ii # finished trying out the adam optimizer", "to perform gradient descent ABORT_EARLY = True # if we stop improving, abort", "= tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) #", "y, weights) batch_size = self.batch_size # convert to tanh-space imgs = np.arctanh((imgs -", "others loss1list = [] if self.TARGETED: # if targetted, optimize for making the", "particular c, now need to decide on the next value # adjust the", "these are variables to be more efficient in sending data to tf self.timg", "self.CONFIDENCE))) else: # if untargeted, optimize for making this class least likely. for", "iteration in range(self.MAX_ITERATIONS): # perform the attack _, l, l2s, scores, nimg =", "modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more efficient in sending", "these are the predictions of each hypothesis if self.TARGETED: return np.dot(x == y,", "imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set the", "is the true label or target label of the class returns a number", "most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] +", "= False # should we target one specific class? or just be wrong?", "here's what we use to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab =", "np.ones(batch_size)*1e10 # the best l2, score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore", "self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x, 1) #", "the maximum other reals = [] others = [] for i in xrange(self.num_models):", "o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset", "attack is the most efficient and should be used as the primary attack", "for making this class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] *", "set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars", "attacks to run simultaneously. targeted: True if we should perform a targetted attack,", "# print \"Outer Step \", outer_step, \"Current C \", CONST, lower_bound, upper_bound prev", "currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii", "a number in [0,1] indicating the expected loss of the learner \"\"\" if", "more strongly classified as adversarial. batch_size: Number of attacks to run simultaneously. targeted:", "self.CONFIDENCE x = np.argmax(x, 1) # these are the predictions of each hypothesis", "= ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\" The", "# distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3])", "tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of the label class", "completely reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2", "imgs, targets, weights): \"\"\" Perform the L_2 attack on the given images for", "np.argmax(batchlab[e])) # expected loss of the learner if currLoss > bestscore[e]: # we've", "we're creating start_vars = set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train", "numpy as np BINARY_SEARCH_STEPS = 9 # number of times to adjust the", "stop improving, abort gradient descent early LEARNING_RATE = 1e-2 # larger values converge", "self.targeted is false, then targets are the original class labels. \"\"\" r =", "other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others =", "print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should abort search if we're", "too small will require a large learning rate and will produce poor results.", "return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run the attack on a", "<<EMAIL>>. ## ## This program is licenced under the BSD 2-Clause licence, ##", "small will require a large learning rate and will produce poor results. abort_early:", "max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax", "= tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights", "(float, int, np.int64)): x = np.copy(x) for v in x: # update the", "self.outputs = [model.predict(self.newimg) for model in models] # distance to the input data", "= l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e] and", "represents the target labels. If self.targeted is false, then targets are the original", "- reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize for making this class", "TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY,", "in sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)", "# the best l2, score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore =", "= self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if", "in range(self.MAX_ITERATIONS): # perform the attack _, l, l2s, scores, nimg = self.sess.run([self.train,", "bestscore[e] = currLoss if currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e] =", "value of c bestl2[e] = l2 bestscore[e] = currLoss if currLoss == bestscore[e]", "used as the primary attack to evaluate potential defenses. Returns adversarial examples for", "# prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for model in models]", "prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x,", "converge. binary_search_steps: The number of times we perform binary search to find the", "[batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to keep bounded", "* self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other)", "/ self.boxmul * 0.999999) # set the lower and upper bounds accordingly lower_bound", "= 0 # how strong the adversarial example should be INITIAL_CONST = 1e-3", "least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] +", "tf.global_variables() new_vars = [x for x in end_vars if x.name not in start_vars]", "self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step", "the attack _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg])", "optimizer for a particular c, now need to decide on the next value", "= models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate", "efficient in sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)),", "num_classes y is the true label or target label of the class returns", "self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\"", "scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200 == 0: #", "for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the", "1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] <", "to use to tune the relative importance of distance and confidence. If binary_search_steps", "self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200 == 0:", "= np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2, score, and image attack", "= l2 o_bestattack[e] = ii # finished trying out the adam optimizer for", "the true label or target label of the class returns a number in", "stuck. initial_const: The initial tradeoff-constant to use to tune the relative importance of", "or target label of the class returns a number in [0,1] indicating the", "num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE =", "perform the attack _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs,", "prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected", "var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x in end_vars if x.name", "getting nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0:", "of images and labels. \"\"\" def compareLoss(x, y): \"\"\" x is an np", "perform a targetted attack, False otherwise. learning_rate: The learning rate for the attack", "targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run the attack", "# used to be e6 for iteration in range(self.MAX_ITERATIONS): # perform the attack", "\"\"\" def compareLoss(x, y): \"\"\" x is an np array of shape num_models", "ii # finished trying out the adam optimizer for a particular c, now", "= 1e-3 # the initial constant c to pick as a first guess", "self.loss2))) # check if we should abort search if we're getting nowhere. (check", "for the supplied model. confidence: Confidence of adversarial examples: higher produces examples that", "\", CONST, lower_bound, upper_bound prev = 1e10 # used to be e6 for", "= np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2, score,", "self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200 ==", "\"Current C \", CONST, lower_bound, upper_bound prev = 1e10 # used to be", "## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This program is licenced under", "LICENCE file in this directory. ## Modified by <NAME> 2017 import tensorflow as", "-= self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x, 1) # these are", "self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration %", "should abort search if we're getting nowhere. (check every 10%) if self.ABORT_EARLY and", "if l > prev*.9999: break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss", "= initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels = num_labels shape =", "true label or target label of the class returns a number in [0,1]", "and labels. \"\"\" def compareLoss(x, y): \"\"\" x is an np array of", "= set(x.name for x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier])", "others # Setup the adam optimizer and keep track of variables we're creating", "what we use to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32,", "initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init =", "o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss ==", "poor results. abort_early: If true, allows early aborts if gradient descent gets stuck.", "in x: # update the target scores for each individual prediction if self.TARGETED:", "but are slower to converge. binary_search_steps: The number of times we perform binary", "images and labels. \"\"\" def compareLoss(x, y): \"\"\" x is an np array", "in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner if", "or just be wrong? CONFIDENCE = 0 # how strong the adversarial example", "MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax = 0.5):", "CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,", "for the attack algorithm. Smaller values produce better results but are slower to", "self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of the", "# compute the probability of the label class versus the maximum other reals", "= num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're going to optimize over", "optimize for making this class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i]", "the target scores for each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else:", "models[0].num_labels self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations", "upper_bound[e])/2 else: CONST[e] *= 100 # return the best solution found return o_bestattack", "the LICENCE file in this directory. ## Modified by <NAME> 2017 import tensorflow", "image, tanh'd to keep bounded from boxmin to boxmax self.boxmul = (boxmax -", "r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run the", "boxmin = -0.5, boxmax = 0.5): \"\"\" The L_2 optimized attack. This attack", "10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if l > prev*.9999:", "sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps", "2016, <NAME> <<EMAIL>>. ## ## This program is licenced under the BSD 2-Clause", "attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in", "internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore", "tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use to assign them self.assign_timg =", "= self.loss1 + self.loss2 self.reals = reals self.others = others # Setup the", "0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should abort search", "accordingly lower_bound = np.zeros(batch_size) CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best", "self.loss1 + self.loss2 self.reals = reals self.others = others # Setup the adam", "if x.name not in start_vars] # these are the variables to initialize when", "of the model self.outputs = [model.predict(self.newimg) for model in models] # distance to", "CONST, self.assign_weights: weights}) # print \"Outer Step \", outer_step, \"Current C \", CONST,", "1e10 # used to be e6 for iteration in range(self.MAX_ITERATIONS): # perform the", "1e-2 # larger values converge faster to less accurate results, default 1e-2 TARGETED", "scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels)", "lower_bound, upper_bound prev = 1e10 # used to be e6 for iteration in", "dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32)", "if currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] =", "results, default 1e-2 TARGETED = False # should we target one specific class?", "as the primary attack to evaluate potential defenses. Returns adversarial examples for the", "reals = [] others = [] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab)", "the adam optimizer and keep track of variables we're creating start_vars = set(x.name", "in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs,", "self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step \", outer_step, \"Current", "run simultaneously. targeted: True if we should perform a targetted attack, False otherwise.", "for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1", "slower to converge. binary_search_steps: The number of times we perform binary search to", "for this value of c bestl2[e] = l2 bestscore[e] = currLoss if currLoss", "= 10000 # number of iterations to perform gradient descent ABORT_EARLY = True", "produce better results but are slower to converge. binary_search_steps: The number of times", "each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE x", "search if we're getting nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS *", "binary search MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent", "and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii # finished trying", "when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars)", "self.assign_weights: weights}) # print \"Outer Step \", outer_step, \"Current C \", CONST, lower_bound,", "Perform the L_2 attack on the given images for the given targets. If", "= (boxmax - boxmin) / 2. self.boxplus = (boxmin + boxmax) / 2.", "l_2 distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This program", "CONST, lower_bound, upper_bound prev = 1e10 # used to be e6 for iteration", "attack algorithm. Smaller values produce better results but are slower to converge. binary_search_steps:", "loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO:", "# print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should abort search if", "abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models = len(models)", "probability of the label class versus the maximum other reals = [] others", "update the target scores for each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE", "batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step \", outer_step, \"Current C", "a batch of images and labels. \"\"\" def compareLoss(x, y): \"\"\" x is", "(batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting", "need to decide on the next value # adjust the constant as needed", "if bestscore[e] == 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e]", "x in end_vars if x.name not in start_vars] # these are the variables", "binary_search_steps: The number of times we perform binary search to find the optimal", "+ self.timg) * self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs", "to initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init", "self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x in", "y, weights) else: return np.dot(x != y, weights) batch_size = self.batch_size # convert", "tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights =", "adversarial example should be INITIAL_CONST = 1e-3 # the initial constant c to", "produce poor results. abort_early: If true, allows early aborts if gradient descent gets", "directory. ## Modified by <NAME> 2017 import tensorflow as tf import numpy as", "of iterations to perform gradient descent ABORT_EARLY = True # if we stop", "label of the class returns a number in [0,1] indicating the expected loss", "(batch_size,image_size,image_size,num_channels) # the variable we're going to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) #", "and should be used as the primary attack to evaluate potential defenses. Returns", "class? or just be wrong? CONFIDENCE = 0 # how strong the adversarial", "9 # number of times to adjust the constant with binary search MAX_ITERATIONS", "every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if l >", "= tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform the L_2 attack on", "max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *=", "in end_vars if x.name not in start_vars] # these are the variables to", "CONST = np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2, score, and image", "prev*.9999: break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e]))", "binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size", "to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute", "converge faster to less accurate results, default 1e-2 TARGETED = False # should", "dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use to assign", "require a large learning rate and will produce poor results. abort_early: If true,", "else: v[y] += self.CONFIDENCE x = np.argmax(x, 1) # these are the predictions", "if we're getting nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10)", "resulting image, tanh'd to keep bounded from boxmin to boxmax self.boxmul = (boxmax", "confidence: Confidence of adversarial examples: higher produces examples that are farther away, but", "send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights})", "CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9:", "print \"Outer Step \", outer_step, \"Current C \", CONST, lower_bound, upper_bound prev =", "CONFIDENCE = 0 # how strong the adversarial example should be INITIAL_CONST =", "class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED,", "tanh'd to keep bounded from boxmin to boxmax self.boxmul = (boxmax - boxmin)", "data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the probability of", "find the optimal tradeoff-constant between distance and confidence. max_iterations: The maximum number of", "relative importance of distance and confidence. If binary_search_steps is large, the initial constant", "are slower to converge. binary_search_steps: The number of times we perform binary search", "self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration", "are variables to be more efficient in sending data to tf self.timg =", "np.dot(x == y, weights) else: return np.dot(x != y, weights) batch_size = self.batch_size", "on a batch of images and labels. \"\"\" def compareLoss(x, y): \"\"\" x", "distance and confidence. max_iterations: The maximum number of iterations. Larger values are more", "away, but more strongly classified as adversarial. batch_size: Number of attacks to run", "boxmax = 0.5): \"\"\" The L_2 optimized attack. This attack is the most", "The learning rate for the attack algorithm. Smaller values produce better results but", "self.CONFIDENCE else: v[y] += self.CONFIDENCE x = np.argmax(x, 1) # these are the", "self.boxmul + self.boxplus # prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for", "= currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e] and l2 < o_bestl2[e]:", "sess, models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE, binary_search_steps", "making this class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i]", "supplied model. confidence: Confidence of adversarial examples: higher produces examples that are farther", "np array of shape num_models x num_classes y is the true label or", "the probability of the label class versus the maximum other reals = []", "if currLoss > bestscore[e]: # we've found a clear improvement for this value", "constant is not important. boxmin: Minimum pixel value (default -0.5). boxmax: Maximum pixel", "likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE)))", "optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x in end_vars if", "weights) else: return np.dot(x != y, weights) batch_size = self.batch_size # convert to", "l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models,", "0: if l > prev*.9999: break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)):", "targets represents the target labels. If self.targeted is false, then targets are the", "reals[i] + self.CONFIDENCE))) else: # if untargeted, optimize for making this class least", "that we don't have to send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab:", "self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE", "more accurate; setting too small will require a large learning rate and will", "to find the optimal tradeoff-constant between distance and confidence. max_iterations: The maximum number", "1) reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list = [] if self.TARGETED:", "self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use to assign them", "and iteration%(self.MAX_ITERATIONS * .10) == 0: if l > prev*.9999: break prev =", "variables to be more efficient in sending data to tf self.timg = tf.Variable(np.zeros(shape),", "the initial constant c to pick as a first guess class CarliniL2: def", "self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we", "to adjust the constant with binary search MAX_ITERATIONS = 10000 # number of", "maximum other reals = [] others = [] for i in xrange(self.num_models): real", "for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list", "= others # Setup the adam optimizer and keep track of variables we're", "* .10) == 0: if l > prev*.9999: break prev = l for", "remove # sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const *", "+ upper_bound[e])/2 else: CONST[e] *= 100 # return the best solution found return", "set the variables so that we don't have to send them over again", "are the original class labels. \"\"\" r = [] # print('go up to',len(imgs))", "CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 # return the best", "= sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS =", "a particular c, now need to decide on the next value # adjust", "should be INITIAL_CONST = 1e-3 # the initial constant c to pick as", "from boxmin to boxmax self.boxmul = (boxmax - boxmin) / 2. self.boxplus =", "currLoss if currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2 if", "pick as a first guess class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence", "BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary", "gradient descent gets stuck. initial_const: The initial tradeoff-constant to use to tune the", "tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform the L_2 attack on the", "each hypothesis if self.TARGETED: return np.dot(x == y, weights) else: return np.dot(x !=", "self.loss1, self.loss2))) # check if we should abort search if we're getting nowhere.", "return np.dot(x != y, weights) batch_size = self.batch_size # convert to tanh-space imgs", "= tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights =", "# set the lower and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST =", "== o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii #", "np BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with", "< o_bestl2[e]: o_bestl2[e] = l2 o_bestattack[e] = ii # finished trying out the", "iteration%(self.MAX_ITERATIONS * .10) == 0: if l > prev*.9999: break prev = l", "BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for model in models] # distance", "np.copy(x) for v in x: # update the target scores for each individual", "to send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights:", "if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if l > prev*.9999: break", "The L_2 optimized attack. This attack is the most efficient and should be", "end_vars if x.name not in start_vars] # these are the variables to initialize", "true, allows early aborts if gradient descent gets stuck. initial_const: The initial tradeoff-constant", "0.5): \"\"\" The L_2 optimized attack. This attack is the most efficient and", "max_iterations: The maximum number of iterations. Larger values are more accurate; setting too", "# update the target scores for each individual prediction if self.TARGETED: v[y] -=", "= [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets,", "and will produce poor results. abort_early: If true, allows early aborts if gradient", "- boxmin) / 2. self.boxplus = (boxmin + boxmax) / 2. self.newimg =", "= max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e]", "search to find the optimal tradeoff-constant between distance and confidence. max_iterations: The maximum", "self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) #", "y): \"\"\" x is an np array of shape num_models x num_classes y", "keep bounded from boxmin to boxmax self.boxmul = (boxmax - boxmin) / 2.", "returns a number in [0,1] indicating the expected loss of the learner \"\"\"", "= tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals,", "c to pick as a first guess class CarliniL2: def __init__(self, sess, models,", "= [1e10]*batch_size bestscore = [0.0]*batch_size # set the variables so that we don't", "licence, ## contained in the LICENCE file in this directory. ## Modified by", "np.array(r) def attack_batch(self, imgs, labs, weights): \"\"\" Run the attack on a batch", "(self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list = [] if", "If self.targeted is true, then the targets represents the target labels. If self.targeted", "adam optimizer and keep track of variables we're creating start_vars = set(x.name for", "the resulting image, tanh'd to keep bounded from boxmin to boxmax self.boxmul =", "expected loss of the learner \"\"\" if not isinstance(x, (float, int, np.int64)): x", "[] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights)) self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights):", "efficient and should be used as the primary attack to evaluate potential defenses.", "np.int64)): x = np.copy(x) for v in x: # update the target scores", "tensorflow as tf import numpy as np BINARY_SEARCH_STEPS = 9 # number of", "= tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to keep bounded from boxmin", "l2, score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack =", "just be wrong? CONFIDENCE = 0 # how strong the adversarial example should", "learning rate for the attack algorithm. Smaller values produce better results but are", "in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars", "v[y] += self.CONFIDENCE x = np.argmax(x, 1) # these are the predictions of", "= self.batch_size # convert to tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul", "be e6 for iteration in range(self.MAX_ITERATIONS): # perform the attack _, l, l2s,", "weights) batch_size = self.batch_size # convert to tanh-space imgs = np.arctanh((imgs - self.boxplus)", "early LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results,", "to boxmax self.boxmul = (boxmax - boxmin) / 2. self.boxplus = (boxmin +", "batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set the variables", "the target labels. If self.targeted is false, then targets are the original class", "l > prev*.9999: break prev = l for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)): currLoss =", "# we've found a clear improvement for this value of c bestl2[e] =", "for iteration in range(self.MAX_ITERATIONS): # perform the attack _, l, l2s, scores, nimg", "10000 # number of iterations to perform gradient descent ABORT_EARLY = True #", "= max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const =", "< 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e]", "[self.num_models]) # the resulting image, tanh'd to keep bounded from boxmin to boxmax", "the adversarial example should be INITIAL_CONST = 1e-3 # the initial constant c", "2017 import tensorflow as tf import numpy as np BINARY_SEARCH_STEPS = 9 #", "use to assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels))", "batch_size self.num_models = len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable", "if currLoss > o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] = ii", "less accurate results, default 1e-2 TARGETED = False # should we target one", "large learning rate and will produce poor results. abort_early: If true, allows early", "in this directory. ## Modified by <NAME> 2017 import tensorflow as tf import", "for a particular c, now need to decide on the next value #", "learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence", "self.boxmul * 0.999999) # set the lower and upper bounds accordingly lower_bound =", "strong the adversarial example should be INITIAL_CONST = 1e-3 # the initial constant", "reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list = [] if self.TARGETED: #", "are the predictions of each hypothesis if self.TARGETED: return np.dot(x == y, weights)", "import numpy as np BINARY_SEARCH_STEPS = 9 # number of times to adjust", "# how strong the adversarial example should be INITIAL_CONST = 1e-3 # the", "descent gets stuck. initial_const: The initial tradeoff-constant to use to tune the relative", "again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer", "== 1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e]", "class returns a number in [0,1] indicating the expected loss of the learner", "model in models] # distance to the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) *", "real = tf.reduce_sum((self.tlab) * self.outputs[i], 1) other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000),", "if untargeted, optimize for making this class least likely. for i in xrange(self.num_models):", "results but are slower to converge. binary_search_steps: The number of times we perform", "prediction BEFORE-SOFTMAX of the model self.outputs = [model.predict(self.newimg) for model in models] #", "# check if we should abort search if we're getting nowhere. (check every", "# convert to tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)", "model. confidence: Confidence of adversarial examples: higher produces examples that are farther away,", "to converge. binary_search_steps: The number of times we perform binary search to find", "== 0: if l > prev*.9999: break prev = l for e,(l2,sc,ii) in", "batch_size = self.batch_size # convert to tanh-space imgs = np.arctanh((imgs - self.boxplus) /", "out the adam optimizer for a particular c, now need to decide on", "# TODO: remove # sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 =", "optimizing for l_2 distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ##", "not important. boxmin: Minimum pixel value (default -0.5). boxmax: Maximum pixel value (default", "rate and will produce poor results. abort_early: If true, allows early aborts if", "sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const", "(lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 # return the best solution found", "The maximum number of iterations. Larger values are more accurate; setting too small", "False otherwise. learning_rate: The learning rate for the attack algorithm. Smaller values produce", "x: # update the target scores for each individual prediction if self.TARGETED: v[y]", "x = np.copy(x) for v in x: # update the target scores for", "C \", CONST, lower_bound, upper_bound prev = 1e10 # used to be e6", "of the learner \"\"\" if not isinstance(x, (float, int, np.int64)): x = np.copy(x)", "np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200 == 0: # print(iteration, self.sess.run((self.loss,", "self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step \", outer_step, \"Current C \",", "loss1list = [] if self.TARGETED: # if targetted, optimize for making the other", "self.init = tf.variables_initializer(var_list=[modifier]+new_vars) def attack(self, imgs, targets, weights): \"\"\" Perform the L_2 attack", "self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const", "first guess class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted", "o_bestattack[e] = ii if currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e] =", "distance and confidence. If binary_search_steps is large, the initial constant is not important.", "# finished trying out the adam optimizer for a particular c, now need", "sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss", "TARGETED = False # should we target one specific class? or just be", "in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))) else: # if", "np.ones(batch_size)*self.initial_const upper_bound = np.ones(batch_size)*1e10 # the best l2, score, and image attack o_bestl2", "if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100", "of times we perform binary search to find the optimal tradeoff-constant between distance", "currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e] and l2 < o_bestl2[e]: o_bestl2[e]", "# completely reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size]", "attack. This attack is the most efficient and should be used as the", "initial constant is not important. boxmin: Minimum pixel value (default -0.5). boxmax: Maximum", "else: # if untargeted, optimize for making this class least likely. for i", "print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return", "to tanh-space imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) # set", "range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init) batch = imgs[:batch_size] batchlab =", "this directory. ## Modified by <NAME> 2017 import tensorflow as tf import numpy", "untargeted, optimize for making this class least likely. for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0,", "used to be e6 for iteration in range(self.MAX_ITERATIONS): # perform the attack _,", "else: return np.dot(x != y, weights) batch_size = self.batch_size # convert to tanh-space", "tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals, others", "o_bestattack[e] = ii # finished trying out the adam optimizer for a particular", "LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results, default", "that are farther away, but more strongly classified as adversarial. batch_size: Number of", "dtype=tf.float32) # and here's what we use to assign them self.assign_timg = tf.placeholder(tf.float32,", "If self.targeted is false, then targets are the original class labels. \"\"\" r", "one specific class? or just be wrong? CONFIDENCE = 0 # how strong", "(reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove # sum", "# if iteration % 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) #", "to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size),", "= [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's", "indicating the expected loss of the learner \"\"\" if not isinstance(x, (float, int,", "= confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models = len(models) self.num_labels =", "and l2 < bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e] =", "= INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\" The L_2 optimized attack.", "lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else:", "the constant with binary search MAX_ITERATIONS = 10000 # number of iterations to", "results. abort_early: If true, allows early aborts if gradient descent gets stuck. initial_const:", "= ii # finished trying out the adam optimizer for a particular c,", "self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to keep bounded from", "now need to decide on the next value # adjust the constant as", "batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size #", "TODO: remove # sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const", "\"\"\" The L_2 optimized attack. This attack is the most efficient and should", "Setup the adam optimizer and keep track of variables we're creating start_vars =", "attack on a batch of images and labels. \"\"\" def compareLoss(x, y): \"\"\"", "\"\"\" Run the attack on a batch of images and labels. \"\"\" def", "> o_bestscore[e]: o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss", "[] if self.TARGETED: # if targetted, optimize for making the other class most", "class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i]", "- self.tlab)*self.outputs[i] - (self.tlab*10000), 1) reals.append(real) others.append(other) self.reals, self.others = reals, others loss1list", "o_bestl2[e] = l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e]", "= (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 # return the best solution", "be INITIAL_CONST = 1e-3 # the initial constant c to pick as a", "loss1list # TODO: remove # sum up the losses self.loss2 = tf.reduce_sum(self.l2dist) self.loss1", "BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5,", "values produce better results but are slower to converge. binary_search_steps: The number of", "np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) # set the lower and upper", "times to adjust the constant with binary search MAX_ITERATIONS = 10000 # number", "adversarial examples: higher produces examples that are farther away, but more strongly classified", "0 # how strong the adversarial example should be INITIAL_CONST = 1e-3 #", "= abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size self.num_models =", "in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list", "= tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals = reals self.others", "Confidence of adversarial examples: higher produces examples that are farther away, but more", "self.others = others # Setup the adam optimizer and keep track of variables", "(boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul +", "+ self.CONFIDENCE))) else: # if untargeted, optimize for making this class least likely.", "+= self.CONFIDENCE x = np.argmax(x, 1) # these are the predictions of each", "!= y, weights) batch_size = self.batch_size # convert to tanh-space imgs = np.arctanh((imgs", "1e-3 # the initial constant c to pick as a first guess class", "[model.predict(self.newimg) for model in models] # distance to the input data self.l2dist =", "for x in end_vars if x.name not in start_vars] # these are the", "don't have to send them over again self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const:", "confidence. If binary_search_steps is large, the initial constant is not important. boxmin: Minimum", "bounded from boxmin to boxmax self.boxmul = (boxmax - boxmin) / 2. self.boxplus", "on the given images for the given targets. If self.targeted is true, then", "= learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE =", "the model self.outputs = [model.predict(self.newimg) for model in models] # distance to the", "most efficient and should be used as the primary attack to evaluate potential", "range(0,len(imgs),self.batch_size): # print('tick',i) r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights)) return np.array(r) def attack_batch(self, imgs, labs, weights):", "iteration % 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if", "tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals =", "variables to initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.setup.append(self.weights.assign(self.assign_weights))", "= np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200 == 0: # print(iteration,", "# if we stop improving, abort gradient descent early LEARNING_RATE = 1e-2 #", "if iteration % 200 == 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check", "for each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] += self.CONFIDENCE", "for l_2 distance ## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This", "adjust the constant with binary search MAX_ITERATIONS = 10000 # number of iterations", "= tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the resulting image, tanh'd to", "as a first guess class CarliniL2: def __init__(self, sess, models, batch_size=1, confidence =", "labels. \"\"\" r = [] # print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size):", "making the other class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0, self.weights[i] *", "attack_batch(self, imgs, labs, weights): \"\"\" Run the attack on a batch of images", "[1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely", "abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin = -0.5, boxmax = 0.5): \"\"\"", "= reals, others loss1list = [] if self.TARGETED: # if targetted, optimize for", "self.l2dist, self.outputs, self.newimg]) scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels) # if iteration % 200", "more efficient in sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab =", "the L_2 attack on the given images for the given targets. If self.targeted", "= l2 bestscore[e] = currLoss if currLoss == bestscore[e] and l2 < bestl2[e]:", "the original class labels. \"\"\" r = [] # print('go up to',len(imgs)) for", "# Setup the adam optimizer and keep track of variables we're creating start_vars", "If true, allows early aborts if gradient descent gets stuck. initial_const: The initial", "boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus #", "(C) 2016, <NAME> <<EMAIL>>. ## ## This program is licenced under the BSD", "with binary search MAX_ITERATIONS = 10000 # number of iterations to perform gradient", "as adversarial. batch_size: Number of attacks to run simultaneously. targeted: True if we", "if gradient descent gets stuck. initial_const: The initial tradeoff-constant to use to tune", "false, then targets are the original class labels. \"\"\" r = [] #", "labs[:batch_size] bestl2 = [1e10]*batch_size bestscore = [0.0]*batch_size # set the variables so that", "== 0: # print(iteration, self.sess.run((self.loss, self.loss1, self.loss2))) # check if we should abort", "we're getting nowhere. (check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) ==", "will produce poor results. abort_early: If true, allows early aborts if gradient descent", "scores for each individual prediction if self.TARGETED: v[y] -= self.CONFIDENCE else: v[y] +=", "finished trying out the adam optimizer for a particular c, now need to", "this value of c bestl2[e] = l2 bestscore[e] = currLoss if currLoss ==", "accurate; setting too small will require a large learning rate and will produce", "boxmin to boxmax self.boxmul = (boxmax - boxmin) / 2. self.boxplus = (boxmin", "v in x: # update the target scores for each individual prediction if", "licenced under the BSD 2-Clause licence, ## contained in the LICENCE file in", "(check every 10%) if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0: if l", "min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e]", "assign them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const =", "= min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else:", "original class labels. \"\"\" r = [] # print('go up to',len(imgs)) for i", "1) # these are the predictions of each hypothesis if self.TARGETED: return np.dot(x", "tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32, [batch_size]) self.assign_weights = tf.placeholder(tf.float32, [self.num_models]) # the", "\"\"\" if not isinstance(x, (float, int, np.int64)): x = np.copy(x) for v in", "for v in x: # update the target scores for each individual prediction", "optimal tradeoff-constant between distance and confidence. max_iterations: The maximum number of iterations. Larger", "self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list)) self.loss = self.loss1 + self.loss2 self.reals = reals", "compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner if currLoss > bestscore[e]: #", "0.999999) # set the lower and upper bounds accordingly lower_bound = np.zeros(batch_size) CONST", "= TARGETED, learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early =", "self.batch_size = batch_size self.num_models = len(models) self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) #", "optimize for making the other class most likely for i in xrange(self.num_models): loss1list.append(tf.maximum(0.0,", "weights): \"\"\" Perform the L_2 attack on the given images for the given", "= tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use", "BSD 2-Clause licence, ## contained in the LICENCE file in this directory. ##", "important. boxmin: Minimum pixel value (default -0.5). boxmax: Maximum pixel value (default 0.5).", "self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32) self.weights", "bestscore = [0.0]*batch_size # set the variables so that we don't have to", "## ## Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This program is licenced", "tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32) self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32) self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)", "= (boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier + self.timg) * self.boxmul", "x in tf.global_variables()) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables()", "these are the variables to initialize when we run self.setup = [] self.setup.append(self.timg.assign(self.assign_timg))", "decide on the next value # adjust the constant as needed for e", "constant as needed for e in range(batch_size): if bestscore[e] == 1.0: upper_bound[e] =", "the initial constant is not important. boxmin: Minimum pixel value (default -0.5). boxmax:", "the predictions of each hypothesis if self.TARGETED: return np.dot(x == y, weights) else:", "l2 o_bestattack[e] = ii # finished trying out the adam optimizer for a", "to keep bounded from boxmin to boxmax self.boxmul = (boxmax - boxmin) /", "binary_search_steps is large, the initial constant is not important. boxmin: Minimum pixel value", "binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const = INITIAL_CONST, boxmin", "# adjust the constant as needed for e in range(batch_size): if bestscore[e] ==", "score, and image attack o_bestl2 = [1e10]*batch_size o_bestscore = [-1]*batch_size o_bestattack = [np.zeros(imgs[0].shape)]*batch_size", "if self.TARGETED: return np.dot(x == y, weights) else: return np.dot(x != y, weights)", "l2 o_bestscore[e] = currLoss o_bestattack[e] = ii if currLoss == o_bestscore[e] and l2", "\"\"\" r = [] # print('go up to',len(imgs)) for i in range(0,len(imgs),self.batch_size): #", "= (lower_bound[e] + upper_bound[e])/2 else: lower_bound[e] = max(lower_bound[e],CONST[e]) if upper_bound[e] < 1e9: CONST[e]", "to be more efficient in sending data to tf self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)", "[np.zeros(imgs[0].shape)]*batch_size for outer_step in range(self.BINARY_SEARCH_STEPS): # completely reset adam's internal state. self.sess.run(self.init) batch", "1.0: upper_bound[e] = min(upper_bound[e], CONST[e]) if upper_bound[e] < 1e9: CONST[e] = (lower_bound[e] +", "is true, then the targets represents the target labels. If self.targeted is false,", "Copyright (C) 2016, <NAME> <<EMAIL>>. ## ## This program is licenced under the", "== y, weights) else: return np.dot(x != y, weights) batch_size = self.batch_size #", "_, l, l2s, scores, nimg = self.sess.run([self.train, self.loss, self.l2dist, self.outputs, self.newimg]) scores =", "otherwise. learning_rate: The learning rate for the attack algorithm. Smaller values produce better", "boxmin) / 2. self.boxplus = (boxmin + boxmax) / 2. self.newimg = tf.tanh(modifier", "trying out the adam optimizer for a particular c, now need to decide", "attack, False otherwise. learning_rate: The learning rate for the attack algorithm. Smaller values", "{self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST, self.assign_weights: weights}) # print \"Outer Step \",", "self.reals = reals self.others = others # Setup the adam optimizer and keep", "__init__(self, sess, models, batch_size=1, confidence = CONFIDENCE, targeted = TARGETED, learning_rate = LEARNING_RATE,", "default 1e-2 TARGETED = False # should we target one specific class? or", "learning_rate = LEARNING_RATE, binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS, abort_early = ABORT_EARLY, initial_const", "If binary_search_steps is large, the initial constant is not important. boxmin: Minimum pixel", "bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2 if currLoss > o_bestscore[e]: o_bestl2[e]", "= tf.Variable(np.zeros(self.num_models), dtype=tf.float32) # and here's what we use to assign them self.assign_timg", "= np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999) # set the lower and", "the input data self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3]) # compute the", "if currLoss == bestscore[e] and l2 < bestl2[e]: bestl2[e] = l2 if currLoss", "them self.assign_timg = tf.placeholder(tf.float32, shape) self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels)) self.assign_const = tf.placeholder(tf.float32,", "self.num_labels = num_labels shape = (batch_size,image_size,image_size,num_channels) # the variable we're going to optimize", "self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))) self.loss1list = loss1list # TODO: remove", "num_models x num_classes y is the true label or target label of the", "learner if currLoss > bestscore[e]: # we've found a clear improvement for this", "1e9: CONST[e] = (lower_bound[e] + upper_bound[e])/2 else: CONST[e] *= 100 # return the", "of the learner if currLoss > bestscore[e]: # we've found a clear improvement", "enumerate(zip(l2s,scores,nimg)): currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner if currLoss", "to optimize over modifier = tf.Variable(np.zeros(shape,dtype=np.float32)) # these are variables to be more", "outer_step, \"Current C \", CONST, lower_bound, upper_bound prev = 1e10 # used to", "[] others = [] for i in xrange(self.num_models): real = tf.reduce_sum((self.tlab) * self.outputs[i]," ]
[ "-------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS # -------------------------------------- from inspecta.inspector import", "========================================= # IMPORTS # -------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS #", "IMPORTS # -------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS # -------------------------------------- from", "# ========================================= # IMPORTS # -------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS", "# IMPORTS # -------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS # --------------------------------------", "# -------------------------------------- import rootpath rootpath.append() # ========================================= # EXPORTS # -------------------------------------- from inspecta.inspector", "import rootpath rootpath.append() # ========================================= # EXPORTS # -------------------------------------- from inspecta.inspector import *" ]
[ "= sera de tipo flotante def restar(request, numero1, numero2): res = numero1 -", "django.http import HttpResponse def index(request): return HttpResponse(\"Desde la vista App\") def sumar(request, numero1,", "def multiplicar(request, numero1, numero2): mul = numero1 * numero2 return HttpResponse(\"La multiplicacion de", "= %s\" % (numero1, numero2, mul)) ### !! %s = sicnifica que sera", "numero1, numero2): res = numero1 - numero2 return HttpResponse(\"La resta de %s -", "numero1, numero2): div = numero1 / numero2 return HttpResponse(\"La division de %s /", "mul = numero1 * numero2 return HttpResponse(\"La multiplicacion de %s * %s =", "HttpResponse def index(request): return HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2): sum", "flotante def dividir(request, numero1, numero2): div = numero1 / numero2 return HttpResponse(\"La division", "vista App\") def sumar(request, numero1, numero2): sum = numero1 + numero2 return HttpResponse(\"La", "multiplicacion de %s * %s = %s\" % (numero1, numero2, mul)) ### !!", "tipo string !! %f = sera de tipo flotante def restar(request, numero1, numero2):", "%s = sicnifica que sera de tipo string !! %f = sera de", "sera de tipo flotante def dividir(request, numero1, numero2): div = numero1 / numero2", "de tipo string !! %f = sera de tipo flotante def dividir(request, numero1,", "= numero1 / numero2 return HttpResponse(\"La division de %s / %s = %f\"", "%s / %s = %f\" % (numero1, numero2, div)) ### !! %s =", "numero1 * numero2 return HttpResponse(\"La multiplicacion de %s * %s = %s\" %", "%s * %s = %s\" % (numero1, numero2, mul)) ### !! %s =", "numero1 + numero2 return HttpResponse(\"La suma de %s + %s = %s\" %", "sera de tipo string !! %f = sera de tipo flotante def dividir(request,", "de tipo flotante def multiplicar(request, numero1, numero2): mul = numero1 * numero2 return", "de %s + %s = %s\" % (numero1, numero2, sum)) ### !! %s", "restar(request, numero1, numero2): res = numero1 - numero2 return HttpResponse(\"La resta de %s", "= %s\" % (numero1, numero2, res)) ### !! %s = sicnifica que sera", "+ numero2 return HttpResponse(\"La suma de %s + %s = %s\" % (numero1,", "= sera de tipo flotante def multiplicar(request, numero1, numero2): mul = numero1 *", "return HttpResponse(\"La suma de %s + %s = %s\" % (numero1, numero2, sum))", "de tipo string !! %f = sera de tipo flotante def restar(request, numero1,", "index(request): return HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2): sum = numero1", "import HttpResponse def index(request): return HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2):", "tipo flotante def multiplicar(request, numero1, numero2): mul = numero1 * numero2 return HttpResponse(\"La", "que sera de tipo string !! %f = sera de tipo flotante def", "= %s\" % (numero1, numero2, sum)) ### !! %s = sicnifica que sera", "division de %s / %s = %f\" % (numero1, numero2, div)) ### !!", "- %s = %s\" % (numero1, numero2, res)) ### !! %s = sicnifica", "= sicnifica que sera de tipo string !! %f = sera de tipo", "numero2): res = numero1 - numero2 return HttpResponse(\"La resta de %s - %s", "views here. from django.http import HttpResponse def index(request): return HttpResponse(\"Desde la vista App\")", "%s = %f\" % (numero1, numero2, div)) ### !! %s = sicnifica que", "numero2 return HttpResponse(\"La suma de %s + %s = %s\" % (numero1, numero2,", "%s\" % (numero1, numero2, res)) ### !! %s = sicnifica que sera de", "resta de %s - %s = %s\" % (numero1, numero2, res)) ### !!", "numero2, res)) ### !! %s = sicnifica que sera de tipo string !!", "numero2 return HttpResponse(\"La multiplicacion de %s * %s = %s\" % (numero1, numero2,", "return HttpResponse(\"La resta de %s - %s = %s\" % (numero1, numero2, res))", "sum = numero1 + numero2 return HttpResponse(\"La suma de %s + %s =", "/ %s = %f\" % (numero1, numero2, div)) ### !! %s = sicnifica", "sumar(request, numero1, numero2): sum = numero1 + numero2 return HttpResponse(\"La suma de %s", "la vista App\") def sumar(request, numero1, numero2): sum = numero1 + numero2 return", "tipo flotante def dividir(request, numero1, numero2): div = numero1 / numero2 return HttpResponse(\"La", "string !! %f = sera de tipo flotante def multiplicar(request, numero1, numero2): mul", "= sera de tipo flotante def dividir(request, numero1, numero2): div = numero1 /", "string !! %f = sera de tipo flotante def restar(request, numero1, numero2): res", "django.shortcuts import render # Create your views here. from django.http import HttpResponse def", "%s = %s\" % (numero1, numero2, res)) ### !! %s = sicnifica que", "string !! %f = sera de tipo flotante def dividir(request, numero1, numero2): div", "%s - %s = %s\" % (numero1, numero2, res)) ### !! %s =", "sum)) ### !! %s = sicnifica que sera de tipo string !! %f", "!! %f = sera de tipo flotante def multiplicar(request, numero1, numero2): mul =", "HttpResponse(\"La multiplicacion de %s * %s = %s\" % (numero1, numero2, mul)) ###", "dividir(request, numero1, numero2): div = numero1 / numero2 return HttpResponse(\"La division de %s", "* %s = %s\" % (numero1, numero2, mul)) ### !! %s = sicnifica", "%s = %s\" % (numero1, numero2, sum)) ### !! %s = sicnifica que", "numero2, sum)) ### !! %s = sicnifica que sera de tipo string !!", "(numero1, numero2, div)) ### !! %s = sicnifica que sera de tipo string", "import render # Create your views here. from django.http import HttpResponse def index(request):", "here. from django.http import HttpResponse def index(request): return HttpResponse(\"Desde la vista App\") def", "(numero1, numero2, mul)) ### !! %s = sicnifica que sera de tipo string", "numero2 return HttpResponse(\"La division de %s / %s = %f\" % (numero1, numero2,", "* numero2 return HttpResponse(\"La multiplicacion de %s * %s = %s\" % (numero1,", "HttpResponse(\"La resta de %s - %s = %s\" % (numero1, numero2, res)) ###", "%f = sera de tipo flotante def multiplicar(request, numero1, numero2): mul = numero1", "%f = sera de tipo flotante def dividir(request, numero1, numero2): div = numero1", "= numero1 + numero2 return HttpResponse(\"La suma de %s + %s = %s\"", "def sumar(request, numero1, numero2): sum = numero1 + numero2 return HttpResponse(\"La suma de", "return HttpResponse(\"La division de %s / %s = %f\" % (numero1, numero2, div))", "sera de tipo flotante def restar(request, numero1, numero2): res = numero1 - numero2", "numero2): mul = numero1 * numero2 return HttpResponse(\"La multiplicacion de %s * %s", "numero2): div = numero1 / numero2 return HttpResponse(\"La division de %s / %s", "(numero1, numero2, sum)) ### !! %s = sicnifica que sera de tipo string", "mul)) ### !! %s = sicnifica que sera de tipo string !! %f", "# Create your views here. from django.http import HttpResponse def index(request): return HttpResponse(\"Desde", "numero2): sum = numero1 + numero2 return HttpResponse(\"La suma de %s + %s", "def index(request): return HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2): sum =", "numero2 return HttpResponse(\"La resta de %s - %s = %s\" % (numero1, numero2,", "tipo string !! %f = sera de tipo flotante def multiplicar(request, numero1, numero2):", "de tipo string !! %f = sera de tipo flotante def multiplicar(request, numero1,", "% (numero1, numero2, mul)) ### !! %s = sicnifica que sera de tipo", "flotante def restar(request, numero1, numero2): res = numero1 - numero2 return HttpResponse(\"La resta", "!! %s = sicnifica que sera de tipo string !! %f = sera", "+ %s = %s\" % (numero1, numero2, sum)) ### !! %s = sicnifica", "%s = %s\" % (numero1, numero2, mul)) ### !! %s = sicnifica que", "def restar(request, numero1, numero2): res = numero1 - numero2 return HttpResponse(\"La resta de", "!! %f = sera de tipo flotante def dividir(request, numero1, numero2): div =", "!! %f = sera de tipo flotante def restar(request, numero1, numero2): res =", "% (numero1, numero2, sum)) ### !! %s = sicnifica que sera de tipo", "your views here. from django.http import HttpResponse def index(request): return HttpResponse(\"Desde la vista", "%f = sera de tipo flotante def restar(request, numero1, numero2): res = numero1", "de %s - %s = %s\" % (numero1, numero2, res)) ### !! %s", "% (numero1, numero2, res)) ### !! %s = sicnifica que sera de tipo", "%f\" % (numero1, numero2, div)) ### !! %s = sicnifica que sera de", "App\") def sumar(request, numero1, numero2): sum = numero1 + numero2 return HttpResponse(\"La suma", "(numero1, numero2, res)) ### !! %s = sicnifica que sera de tipo string", "def dividir(request, numero1, numero2): div = numero1 / numero2 return HttpResponse(\"La division de", "numero1, numero2): mul = numero1 * numero2 return HttpResponse(\"La multiplicacion de %s *", "div = numero1 / numero2 return HttpResponse(\"La division de %s / %s =", "div)) ### !! %s = sicnifica que sera de tipo string !! %f", "HttpResponse(\"La division de %s / %s = %f\" % (numero1, numero2, div)) ###", "% (numero1, numero2, div)) ### !! %s = sicnifica que sera de tipo", "%s\" % (numero1, numero2, sum)) ### !! %s = sicnifica que sera de", "/ numero2 return HttpResponse(\"La division de %s / %s = %f\" % (numero1,", "render # Create your views here. from django.http import HttpResponse def index(request): return", "de %s / %s = %f\" % (numero1, numero2, div)) ### !! %s", "%s + %s = %s\" % (numero1, numero2, sum)) ### !! %s =", "return HttpResponse(\"La multiplicacion de %s * %s = %s\" % (numero1, numero2, mul))", "from django.http import HttpResponse def index(request): return HttpResponse(\"Desde la vista App\") def sumar(request,", "res)) ### !! %s = sicnifica que sera de tipo string !! %f", "multiplicar(request, numero1, numero2): mul = numero1 * numero2 return HttpResponse(\"La multiplicacion de %s", "sicnifica que sera de tipo string !! %f = sera de tipo flotante", "tipo string !! %f = sera de tipo flotante def dividir(request, numero1, numero2):", "return HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2): sum = numero1 +", "HttpResponse(\"Desde la vista App\") def sumar(request, numero1, numero2): sum = numero1 + numero2", "de tipo flotante def dividir(request, numero1, numero2): div = numero1 / numero2 return", "%s\" % (numero1, numero2, mul)) ### !! %s = sicnifica que sera de", "de %s * %s = %s\" % (numero1, numero2, mul)) ### !! %s", "numero1 - numero2 return HttpResponse(\"La resta de %s - %s = %s\" %", "numero1 / numero2 return HttpResponse(\"La division de %s / %s = %f\" %", "de tipo flotante def restar(request, numero1, numero2): res = numero1 - numero2 return", "res = numero1 - numero2 return HttpResponse(\"La resta de %s - %s =", "flotante def multiplicar(request, numero1, numero2): mul = numero1 * numero2 return HttpResponse(\"La multiplicacion", "- numero2 return HttpResponse(\"La resta de %s - %s = %s\" % (numero1,", "sera de tipo string !! %f = sera de tipo flotante def restar(request,", "numero1, numero2): sum = numero1 + numero2 return HttpResponse(\"La suma de %s +", "numero2, mul)) ### !! %s = sicnifica que sera de tipo string !!", "sera de tipo flotante def multiplicar(request, numero1, numero2): mul = numero1 * numero2", "from django.shortcuts import render # Create your views here. from django.http import HttpResponse", "= numero1 - numero2 return HttpResponse(\"La resta de %s - %s = %s\"", "Create your views here. from django.http import HttpResponse def index(request): return HttpResponse(\"Desde la", "= %f\" % (numero1, numero2, div)) ### !! %s = sicnifica que sera", "numero2, div)) ### !! %s = sicnifica que sera de tipo string !!", "HttpResponse(\"La suma de %s + %s = %s\" % (numero1, numero2, sum)) ###", "### !! %s = sicnifica que sera de tipo string !! %f =", "= numero1 * numero2 return HttpResponse(\"La multiplicacion de %s * %s = %s\"", "tipo flotante def restar(request, numero1, numero2): res = numero1 - numero2 return HttpResponse(\"La", "suma de %s + %s = %s\" % (numero1, numero2, sum)) ### !!", "sera de tipo string !! %f = sera de tipo flotante def multiplicar(request," ]
[ "log from time import sleep from utils import createCubes, releaseCubes # Identify each", "#%d, Iteration #%d\" % (i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get())", "as log from time import sleep from utils import createCubes, releaseCubes # Identify", "the battery level on the console. cubes = createCubes(initialReport=True) try: for k in", "sleep from utils import createCubes, releaseCubes # Identify each cube by the color", "range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i +", "i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i + 1, k", "and the sound signal, # and report the battery level on the console.", "the sound signal, # and report the battery level on the console. cubes", "<filename>examples/simplequery.py<gh_stars>1-10 import logging as log from time import sleep from utils import createCubes,", "level on the console. cubes = createCubes(initialReport=True) try: for k in range(10): for", "each cube by the color and the sound signal, # and report the", "k in range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" %", "import sleep from utils import createCubes, releaseCubes # Identify each cube by the", "battery level on the console. cubes = createCubes(initialReport=True) try: for k in range(10):", "c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i + 1, k +", "log.info(\"Cube #%d, Iteration #%d\" % (i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get())", "+ 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get()) sleep(0.5) finally: #", "createCubes(initialReport=True) try: for k in range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d,", "k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get()) sleep(0.5) finally: # Disconnect releaseCubes(cubes)", "# and report the battery level on the console. cubes = createCubes(initialReport=True) try:", "import createCubes, releaseCubes # Identify each cube by the color and the sound", "try: for k in range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration", "color and the sound signal, # and report the battery level on the", "cube by the color and the sound signal, # and report the battery", "# Identify each cube by the color and the sound signal, # and", "time import sleep from utils import createCubes, releaseCubes # Identify each cube by", "the console. cubes = createCubes(initialReport=True) try: for k in range(10): for i, c", "createCubes, releaseCubes # Identify each cube by the color and the sound signal,", "cubes = createCubes(initialReport=True) try: for k in range(10): for i, c in enumerate(cubes):", "report the battery level on the console. cubes = createCubes(initialReport=True) try: for k", "from time import sleep from utils import createCubes, releaseCubes # Identify each cube", "from utils import createCubes, releaseCubes # Identify each cube by the color and", "in range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i", "#%d\" % (i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get())", "(i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get()) sleep(0.5) finally:", "enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i + 1, k + 1)) log.info(c.getConfigProtocolVersion())", "sound signal, # and report the battery level on the console. cubes =", "signal, # and report the battery level on the console. cubes = createCubes(initialReport=True)", "on the console. cubes = createCubes(initialReport=True) try: for k in range(10): for i,", "console. cubes = createCubes(initialReport=True) try: for k in range(10): for i, c in", "utils import createCubes, releaseCubes # Identify each cube by the color and the", "% (i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get()) sleep(0.5)", "Identify each cube by the color and the sound signal, # and report", "= createCubes(initialReport=True) try: for k in range(10): for i, c in enumerate(cubes): log.info(\"Cube", "in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i + 1, k + 1))", "by the color and the sound signal, # and report the battery level", "and report the battery level on the console. cubes = createCubes(initialReport=True) try: for", "for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\" % (i + 1,", "logging as log from time import sleep from utils import createCubes, releaseCubes #", "the color and the sound signal, # and report the battery level on", "releaseCubes # Identify each cube by the color and the sound signal, #", "for k in range(10): for i, c in enumerate(cubes): log.info(\"Cube #%d, Iteration #%d\"", "Iteration #%d\" % (i + 1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get())", "import logging as log from time import sleep from utils import createCubes, releaseCubes", "1, k + 1)) log.info(c.getConfigProtocolVersion()) log.info(c.toioID.get()) log.info(c.motion.get()) log.info(c.button.get()) log.info(c.battery.get()) sleep(0.5) finally: # Disconnect" ]
[ "<filename>students/K33401/Tikhonova_Elena/Lr1/task_4/server.py import socket import threading def get_connect(socket, number): print(\"I am thread number\", number)", "if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as", "+ ':' + data.decode(\"utf-8\") print(message) for key in messages: if key != name:", "!= b'.': message = name + ':' + data.decode(\"utf-8\") print(message) for key in", "') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected')", "== \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages = {}", "clientsocket.recv(1024) if data == b'q': print(name, 'left the chat') break else: if data", "am thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name: ')", "bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:", "__name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages =", "= [] for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for", "= [] threads = [] for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,))", "== b'q': print(name, 'left the chat') break else: if data != b'.': message", "else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080))", "messages['all'] = [] threads = [] for i in range(3): x = threading.Thread(target=get_connect,", "data.decode(\"utf-8\") print(message) for key in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True:", "= name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while True: data = clientsocket.recv(1024)", "messages[name] = [] print(name, 'is connected') while True: data = clientsocket.recv(1024) if data", "= name + ':' + data.decode(\"utf-8\") print(message) for key in messages: if key", "while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET,", "import socket import threading def get_connect(socket, number): print(\"I am thread number\", number) clientsocket,", "clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] =", "name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while True: data =", "chat') break else: if data != b'.': message = name + ':' +", "s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] = [] threads = [] for", "with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] =", "name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\":", "print(name, 'is connected') while True: data = clientsocket.recv(1024) if data == b'q': print(name,", "key in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n')", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] = []", "your name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name,", "[] threads = [] for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x)", "8080)) s.listen(10) messages = {} messages['all'] = [] threads = [] for i", "in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for i in range(3): threads[i].join()", "b'.': message = name + ':' + data.decode(\"utf-8\") print(message) for key in messages:", "data != b'.': message = name + ':' + data.decode(\"utf-8\") print(message) for key", "{} messages['all'] = [] threads = [] for i in range(3): x =", "'left the chat') break else: if data != b'.': message = name +", "print(name, 'left the chat') break else: if data != b'.': message = name", "name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is", "+ data.decode(\"utf-8\") print(message) for key in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while", "\"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all']", "if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if", "socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] = [] threads", "clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost',", "else: if data != b'.': message = name + ':' + data.decode(\"utf-8\") print(message)", "for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for i in", "True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "'is connected') while True: data = clientsocket.recv(1024) if data == b'q': print(name, 'left", "print(message) for key in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if", "messages = {} messages['all'] = [] threads = [] for i in range(3):", "data = clientsocket.recv(1024) if data == b'q': print(name, 'left the chat') break else:", "data == b'q': print(name, 'left the chat') break else: if data != b'.':", "threads = [] for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start()", "name + ':' + data.decode(\"utf-8\") print(message) for key in messages: if key !=", "s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] = [] threads = []", "range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for i in range(3): threads[i].join() #print(messages['all'])", "as s: s.bind(('localhost', 8080)) s.listen(10) messages = {} messages['all'] = [] threads =", "i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for i in range(3):", "number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024)", "for key in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]):", "messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ == \"__main__\": with", "print(\"I am thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name:", "message = name + ':' + data.decode(\"utf-8\") print(message) for key in messages: if", "key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__", "if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10) messages", "address = socket.accept() clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024) name =", "s.listen(10) messages = {} messages['all'] = [] threads = [] for i in", "= {} messages['all'] = [] threads = [] for i in range(3): x", "socket import threading def get_connect(socket, number): print(\"I am thread number\", number) clientsocket, address", "import threading def get_connect(socket, number): print(\"I am thread number\", number) clientsocket, address =", "break if __name__ == \"__main__\": with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('localhost', 8080)) s.listen(10)", "if data != b'.': message = name + ':' + data.decode(\"utf-8\") print(message) for", "clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024) name", "enter your name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = []", "= [] print(name, 'is connected') while True: data = clientsocket.recv(1024) if data ==", "in messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else:", "break else: if data != b'.': message = name + ':' + data.decode(\"utf-8\")", "messages: if key != name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break", "while True: data = clientsocket.recv(1024) if data == b'q': print(name, 'left the chat')", "thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name: ') name", "if data == b'q': print(name, 'left the chat') break else: if data !=", "connected') while True: data = clientsocket.recv(1024) if data == b'q': print(name, 'left the", "number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your name: ') name =", "':' + data.decode(\"utf-8\") print(message) for key in messages: if key != name: messages[key].append(message.encode(\"utf-8\"))", "= clientsocket.recv(1024) if data == b'q': print(name, 'left the chat') break else: if", "name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while True: data = clientsocket.recv(1024) if", "threading def get_connect(socket, number): print(\"I am thread number\", number) clientsocket, address = socket.accept()", "= clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while True:", "clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while True: data", "= socket.accept() clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\")", "True: data = clientsocket.recv(1024) if data == b'q': print(name, 'left the chat') break", "[] for i in range(3): x = threading.Thread(target=get_connect, args=(s,i+1,)) threads.append(x) x.start() for i", "socket.accept() clientsocket.send(b'Please enter your name: ') name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name]", "get_connect(socket, number): print(\"I am thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter", "def get_connect(socket, number): print(\"I am thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please", "name = clientsocket.recv(1024) name = name.decode(\"utf-8\") messages[name] = [] print(name, 'is connected') while", "[] print(name, 'is connected') while True: data = clientsocket.recv(1024) if data == b'q':", "!= name: messages[key].append(message.encode(\"utf-8\")) while True: if bool(messages[name]): clientsocket.send(messages[name].pop(0)+b'\\n') else: break if __name__ ==", "number): print(\"I am thread number\", number) clientsocket, address = socket.accept() clientsocket.send(b'Please enter your", "b'q': print(name, 'left the chat') break else: if data != b'.': message =", "the chat') break else: if data != b'.': message = name + ':'" ]
[ "b = 2 temp = a a = b b = temp print(a,b)", "a = 1 b = 2 temp = a a = b b", "1 b = 2 temp = a a = b b = temp", "= 1 b = 2 temp = a a = b b =" ]
[ "editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (", "\"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5),", "django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ),", "primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ),", "[ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ),", "\"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False,", "class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name=\"Car\",", "( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE,", "False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\",", "), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={ \"abstract\":", "), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField(", "name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\",", "] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={", "django.utils.timezone from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): initial = True", "), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey(", "editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, },", "models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField(", "models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ (", "migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), (", "), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={ \"abstract\": False,", "= [] operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True,", "validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ),", "), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\",", "verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ),", "(\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\",", "), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\",", "editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ),", "options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True,", "import django.db.models.deletion import django.utils.timezone from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration):", "models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\",", "serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), (", "verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)),", "default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ),", "), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField(", "import django.utils.timezone from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): initial =", "(\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[", "( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1),", "auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ),", "}, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ),", "), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel(", "], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True,", "models import model_utils.fields class Migration(migrations.Migration): initial = True dependencies = [] operations =", "( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now,", "import model_utils.fields class Migration(migrations.Migration): initial = True dependencies = [] operations = [", "django.db.models.deletion import django.utils.timezone from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): initial", "name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\",", "verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\",", "model_utils.fields class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel(", "), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now,", "initial = True dependencies = [] operations = [ migrations.CreateModel( name=\"Car\", fields=[ (", "model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ),", "[] operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False,", "django.core.validators import django.db.models.deletion import django.utils.timezone from django.db import migrations, models import model_utils.fields class", "), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)),", "django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): initial = True dependencies =", "True dependencies = [] operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField(", "default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ),", "django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ],", "\"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={ \"abstract\": False, }, ),", "models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={ \"abstract\": False, }, ), ]", "default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False,", "( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False,", "( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\", to=\"cars.car\", ), ), ], options={ \"abstract\": False, },", "( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ],", "model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\":", "\"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False,", "editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\",", "\"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\",", "= True dependencies = [] operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\",", "verbose_name=\"created\", ), ), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\",", "), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={ \"abstract\": False, }, ), migrations.CreateModel( name=\"Rate\",", "from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): initial = True dependencies", "migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), (", "import django.core.validators import django.db.models.deletion import django.utils.timezone from django.db import migrations, models import model_utils.fields", "verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), (", "\"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ] ), ), ( \"car\", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name=\"rates\",", "= [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ),", "), migrations.CreateModel( name=\"Rate\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ),", "model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5), ]", "import migrations, models import model_utils.fields class Migration(migrations.Migration): initial = True dependencies = []", "migrations, models import model_utils.fields class Migration(migrations.Migration): initial = True dependencies = [] operations", "), ( \"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), ( \"rate\", models.PositiveSmallIntegerField( validators=[", "fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField(", "models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\", ), ), ( \"created\", model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name=\"created\",", "\"modified\", model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name=\"modified\", ), ), (\"maker\", models.CharField(max_length=50)), (\"model\", models.CharField(max_length=50)), ], options={", "dependencies = [] operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True,", "operations = [ migrations.CreateModel( name=\"Car\", fields=[ ( \"id\", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\",", "Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name=\"Car\", fields=[" ]
[ "import os import json from .exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base", "self._path = path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does not", ".exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base calculation data from provided JSON", "not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\") as f:", "json from .exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base calculation data from", "os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\") as f: data", "def __init__(self, path): self._path = path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data", "DataLoadException class JsonDataLoader: \"\"\" Load base calculation data from provided JSON file. \"\"\"", "path): self._path = path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does", "__init__(self, path): self._path = path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file", "def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with open(self._path,", "= path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\")", "file does not exist.\") with open(self._path, \"r\") as f: data = json.load(f) return", "from provided JSON file. \"\"\" def __init__(self, path): self._path = path def load(self):", "from .exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base calculation data from provided", "\"\"\" def __init__(self, path): self._path = path def load(self): if not os.path.exists(self._path): raise", "file. \"\"\" def __init__(self, path): self._path = path def load(self): if not os.path.exists(self._path):", "import json from .exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base calculation data", "calculation data from provided JSON file. \"\"\" def __init__(self, path): self._path = path", "data from provided JSON file. \"\"\" def __init__(self, path): self._path = path def", "DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\") as f: data = json.load(f)", "raise DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\") as f: data =", "if not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\") as", "base calculation data from provided JSON file. \"\"\" def __init__(self, path): self._path =", "JSON file. \"\"\" def __init__(self, path): self._path = path def load(self): if not", "import DataLoadException class JsonDataLoader: \"\"\" Load base calculation data from provided JSON file.", "load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with open(self._path, \"r\")", "does not exist.\") with open(self._path, \"r\") as f: data = json.load(f) return data", "JsonDataLoader: \"\"\" Load base calculation data from provided JSON file. \"\"\" def __init__(self,", "os import json from .exceptions import DataLoadException class JsonDataLoader: \"\"\" Load base calculation", "Load base calculation data from provided JSON file. \"\"\" def __init__(self, path): self._path", "\"\"\" Load base calculation data from provided JSON file. \"\"\" def __init__(self, path):", "path def load(self): if not os.path.exists(self._path): raise DataLoadException(\"Data file does not exist.\") with", "class JsonDataLoader: \"\"\" Load base calculation data from provided JSON file. \"\"\" def", "provided JSON file. \"\"\" def __init__(self, path): self._path = path def load(self): if" ]
[ "permissions import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm", "import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class", "rest_framework import viewsets, permissions import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import", "viewsets, permissions import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as", "ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet):", "cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset =", "as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class =", "as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class = cfs.CupFightSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,", "import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class = cfs.CupFightSerializer permission_classes", "from rest_framework import viewsets, permissions import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs", "ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class", "ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class = cfs.CupFightSerializer permission_classes =", "import viewsets, permissions import ippon.cup_fight.permissions as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight", "as cfp import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset", "import ippon.cup_fight.serializers as cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all()", "cfs import ippon.models.cup_fight as cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class = cfs.CupFightSerializer", "cfm class CupFightViewSet(viewsets.ModelViewSet): queryset = cfm.CupFight.objects.all() serializer_class = cfs.CupFightSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly, cfp.IsCupFightOwnerOrReadOnly)" ]
[ "\"width\" # The key for the height KEY_HEIGHT: str = \"height\" # The", "Xs of the polygon in the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\"", "key for the height KEY_HEIGHT: str = \"height\" # The key for the", "list) KEY_POLY_Y: str = \"poly_y\" # The key for the overall count KEY_COUNT:", "The key for the Y location KEY_Y: str = \"y\" # The key", "\"x\" # The key for the Y location KEY_Y: str = \"y\" #", "\"y\" # The key for the width KEY_WIDTH: str = \"width\" # The", "the overall count KEY_COUNT: str = \"count\" # The key for the index", "The key for the height KEY_HEIGHT: str = \"height\" # The key for", "str = \"width\" # The key for the height KEY_HEIGHT: str = \"height\"", "str = \"poly_x\" # The key for the Ys of the polygon in", "The key for the width KEY_WIDTH: str = \"width\" # The key for", "str = \"count\" # The key for the index of a group KEY_INDEX:", "polygon in the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The key", "str = \"y\" # The key for the width KEY_WIDTH: str = \"width\"", "the polygon in the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" # The", "the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" # The key for the", "in the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" # The key for", "\"\"\" Module for constants relating to locating objects. \"\"\" # The key for", "for the location KEY_LOCATION: str = \"location\" # The key for the Xs", "the polygon in the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The", "The key for the location KEY_LOCATION: str = \"location\" # The key for", "# The key for the Xs of the polygon in the meta-data (comma-separated", "polygon in the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" # The key", "(comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The key for the overall count", "location KEY_Y: str = \"y\" # The key for the width KEY_WIDTH: str", "location KEY_LOCATION: str = \"location\" # The key for the Xs of the", "key for the X location KEY_X: str = \"x\" # The key for", "= \"poly_y\" # The key for the overall count KEY_COUNT: str = \"count\"", "= \"width\" # The key for the height KEY_HEIGHT: str = \"height\" #", "The key for the Xs of the polygon in the meta-data (comma-separated list)", "relating to locating objects. \"\"\" # The key for the X location KEY_X:", "of the polygon in the meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" #", "for the Y location KEY_Y: str = \"y\" # The key for the", "str = \"poly_y\" # The key for the overall count KEY_COUNT: str =", "# The key for the Ys of the polygon in the meta-data (comma-separated", "\"height\" # The key for the location KEY_LOCATION: str = \"location\" # The", "constants relating to locating objects. \"\"\" # The key for the X location", "Ys of the polygon in the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\"", "KEY_HEIGHT: str = \"height\" # The key for the location KEY_LOCATION: str =", "X location KEY_X: str = \"x\" # The key for the Y location", "locating objects. \"\"\" # The key for the X location KEY_X: str =", "# The key for the width KEY_WIDTH: str = \"width\" # The key", "\"poly_x\" # The key for the Ys of the polygon in the meta-data", "str = \"location\" # The key for the Xs of the polygon in", "# The key for the index of a group KEY_INDEX: str = \"index\"", "# The key for the overall count KEY_COUNT: str = \"count\" # The", "= \"poly_x\" # The key for the Ys of the polygon in the", "= \"count\" # The key for the index of a group KEY_INDEX: str", "the Y location KEY_Y: str = \"y\" # The key for the width", "The key for the Ys of the polygon in the meta-data (comma-separated list)", "objects. \"\"\" # The key for the X location KEY_X: str = \"x\"", "for the X location KEY_X: str = \"x\" # The key for the", "key for the location KEY_LOCATION: str = \"location\" # The key for the", "for constants relating to locating objects. \"\"\" # The key for the X", "the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The key for the", "# The key for the height KEY_HEIGHT: str = \"height\" # The key", "The key for the X location KEY_X: str = \"x\" # The key", "# The key for the Y location KEY_Y: str = \"y\" # The", "Y location KEY_Y: str = \"y\" # The key for the width KEY_WIDTH:", "# The key for the location KEY_LOCATION: str = \"location\" # The key", "KEY_X: str = \"x\" # The key for the Y location KEY_Y: str", "the Ys of the polygon in the meta-data (comma-separated list) KEY_POLY_Y: str =", "\"count\" # The key for the index of a group KEY_INDEX: str =", "for the width KEY_WIDTH: str = \"width\" # The key for the height", "meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The key for the overall", "The key for the overall count KEY_COUNT: str = \"count\" # The key", "\"poly_y\" # The key for the overall count KEY_COUNT: str = \"count\" #", "height KEY_HEIGHT: str = \"height\" # The key for the location KEY_LOCATION: str", "= \"height\" # The key for the location KEY_LOCATION: str = \"location\" #", "= \"x\" # The key for the Y location KEY_Y: str = \"y\"", "= \"y\" # The key for the width KEY_WIDTH: str = \"width\" #", "for the overall count KEY_COUNT: str = \"count\" # The key for the", "the X location KEY_X: str = \"x\" # The key for the Y", "Module for constants relating to locating objects. \"\"\" # The key for the", "\"\"\" # The key for the X location KEY_X: str = \"x\" #", "key for the Ys of the polygon in the meta-data (comma-separated list) KEY_POLY_Y:", "KEY_COUNT: str = \"count\" # The key for the index of a group", "KEY_POLY_Y: str = \"poly_y\" # The key for the overall count KEY_COUNT: str", "count KEY_COUNT: str = \"count\" # The key for the index of a", "for the height KEY_HEIGHT: str = \"height\" # The key for the location", "meta-data (comma-separated list) KEY_POLY_X: str = \"poly_x\" # The key for the Ys", "KEY_WIDTH: str = \"width\" # The key for the height KEY_HEIGHT: str =", "KEY_Y: str = \"y\" # The key for the width KEY_WIDTH: str =", "the height KEY_HEIGHT: str = \"height\" # The key for the location KEY_LOCATION:", "to locating objects. \"\"\" # The key for the X location KEY_X: str", "the location KEY_LOCATION: str = \"location\" # The key for the Xs of", "for the Ys of the polygon in the meta-data (comma-separated list) KEY_POLY_Y: str", "list) KEY_POLY_X: str = \"poly_x\" # The key for the Ys of the", "KEY_POLY_X: str = \"poly_x\" # The key for the Ys of the polygon", "str = \"x\" # The key for the Y location KEY_Y: str =", "KEY_LOCATION: str = \"location\" # The key for the Xs of the polygon", "# The key for the X location KEY_X: str = \"x\" # The", "= \"location\" # The key for the Xs of the polygon in the", "width KEY_WIDTH: str = \"width\" # The key for the height KEY_HEIGHT: str", "str = \"height\" # The key for the location KEY_LOCATION: str = \"location\"", "the Xs of the polygon in the meta-data (comma-separated list) KEY_POLY_X: str =", "key for the Y location KEY_Y: str = \"y\" # The key for", "(comma-separated list) KEY_POLY_X: str = \"poly_x\" # The key for the Ys of", "of the polygon in the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" #", "in the meta-data (comma-separated list) KEY_POLY_Y: str = \"poly_y\" # The key for", "\"location\" # The key for the Xs of the polygon in the meta-data", "overall count KEY_COUNT: str = \"count\" # The key for the index of", "location KEY_X: str = \"x\" # The key for the Y location KEY_Y:", "key for the overall count KEY_COUNT: str = \"count\" # The key for", "the width KEY_WIDTH: str = \"width\" # The key for the height KEY_HEIGHT:", "for the Xs of the polygon in the meta-data (comma-separated list) KEY_POLY_X: str", "key for the Xs of the polygon in the meta-data (comma-separated list) KEY_POLY_X:", "key for the width KEY_WIDTH: str = \"width\" # The key for the" ]
[ "= { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing", "u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event, data) assert len(event.locations)", "2 description updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location = event.locations[0]", ") event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location ]", "self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id == location_id assert event.locations[0].name ==", "form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event", "sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event", "= self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save()", "model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def", "} self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id == location_id assert event.locations[0].name", "'locations-0-description': u'Some description' } if not event: event = self.Event() self.session.add(event) form =", "primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations')", "'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event self.Location = Location def create_forms(self):", "else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def", "== u'Some location' data = { 'name': u'Some event' } form = self.EventForm(MultiDict(data))", "event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id", "u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description',", "location2 = event.locations[1] assert location.name == u'Location 2 updated' assert location.description == u'Location", "'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) == 1 assert event.locations[0] ==", "= 'location' TYPES = (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)", "EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm", "== u'Location 2 updated' assert location.description == u'Location 2 description updated' assert len(event.locations)", "updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit()", "u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data = { 'name': u'Some event'", "== [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id =", "Meta: model = self.Location class EventForm(ModelForm): class Meta: model = self.Event locations =", "assert not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event =", "== u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data = { 'name': u'Some", "location' def test_delete_all_field_list_entries(self): event = self.save() data = { 'name': u'Some event' }", "} self.save(event, data) assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data =", "= self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id", "= (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255),", "primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255),", "location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert", "} form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations ==", "assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event", "population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save()", "= self.save(data=data) location_id = event.locations[0].id self.session.commit() data = { 'name': u'Some event', 'locations-0-name':", "data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description':", "event=None, data=None): if not data: data = { 'name': u'Some event', 'locations-0-name': u'Some", "other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data = {", "= { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if", "create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm): class Meta: model", "description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data =", "self.Location( name=u'Location #2' ) event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1'", "u'Some location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some other location', 'locations-2-name':", "zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def", "event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate()", "str(location_id), # test coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id':", "== 1 def test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name': u'Location 1',", "self.Location only = ['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta:", "u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event =", "import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class", "self.Location = Location def save(self, event=None, data=None): if not data: data = {", "} self.save(event, data) self.session.commit() location = event.locations[0] location2 = event.locations[1] assert location.name ==", "LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name ==", "locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm def", "ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer,", "len(event.locations) == 4 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location'", "self.save(event, data) assert not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2' )", "'locations-1-description': u'Location 2 description', } event = self.save(data=data) self.session.commit() data = { 'name':", "def save(self, event=None, data=None): if not data: data = { 'name': u'Some event',", "id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'')", "location' } self.save(event, data) assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data", "self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert", "== u'' def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data = {", "info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations')", "'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2',", "self.session.commit() event = self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class", "= sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event", "obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data =", "u'Location 2 description updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location =", "obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class", "locations=[ self.Location( name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit() data = {", "Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer,", "} event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data = { 'name': u'Some", "{ 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing works", "nullable=False) class Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football field', u'restaurant') id", "event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name':", "{ 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if not", "description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' )", "u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', } event =", "assert event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third location' assert event.locations[3].name ==", "= ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self):", "'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first()", "self.Event = Event self.Location = Location def save(self, event=None, data=None): if not data:", "u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data", "} self.save(event, data) assert not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2'", "assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' assert event.locations[1].name ==", "test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some event',", "== 4 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' assert", "event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations", "123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id", "sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event,", "class Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football field', u'restaurant') id =", "u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255),", "data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' }", "test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some event',", "assert len(event.locations) == 1 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other", "event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data)", "assert location2.description == u'' def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data", "event = self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base):", "name=%r)' % (self.id, self.name) self.Event = Event self.Location = Location def create_forms(self): class", "LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event)", "event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event)", "= self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': location_id,", "'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event, data) assert len(event.locations) == 1", "class LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm): class Meta: model =", "1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', }", "location', } self.save(event, data) location = event.locations[0] assert location.id != location_id assert location.name", "assert len(event.locations) == 4 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other", "len(event.locations) == 1 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location'", "'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3', }", "data = { 'name': u'Some event' } self.save(event, data) assert not event.locations def", "self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name':", "assert len(event.locations) == 2 assert location2.name == u'Location 3' assert location2.description == u''", "self.session.commit() location = event.locations[0] location2 = event.locations[1] assert location.name == u'Location 2 updated'", "EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm", "= sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event", "= event.locations[0].id self.session.commit() data = { 'name': u'Some event', 'locations-0-name': u'Some other location',", "location.description == u'' assert location.type == u'' assert len(event.locations) == 1 def test_replace_and_update(self):", "form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model", "self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event)", "event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location ] )", "import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class", "'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event locations =", "= self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event)", "assert event.locations[0].id != location_id def test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name':", "= Location def save(self, event=None, data=None): if not data: data = { 'name':", "= self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class", "= { 'name': u'Some event', 'locations-0-name': u'Some other location', } self.save(event, data) location", "event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location'", "= self.save() data = { 'name': u'Some event' } self.save(event, data) assert not", "def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some", "Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm", "self.save(data=data) location_id = event.locations[0].id self.session.commit() data = { 'name': u'Some event', 'locations-0-name': u'Some", "location.type == u'' assert len(event.locations) == 1 def test_replace_and_update(self): data = { 'name':", "Meta: model = self.Location only = ['name', 'description', 'type'] id = PassiveHiddenField() class", "from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList,", "u'Some other location' assert location.description == u'' assert location.type == u'' assert len(event.locations)", "u'' def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data = { 'name':", "self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit()", "location', 'locations-0-description': u'Some description' } if not event: event = self.Event() self.session.add(event) form", "{ 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2", "= self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': 12,", "{ 'name': u'Some event' } self.save(event, data) assert not event.locations def test_update_and_remove(self): location", "self.save(event, data) self.session.commit() location = event.locations[0] location2 = event.locations[1] assert location.name == u'Location", "def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some", "event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event =", ") self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data)", "sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football field', u'restaurant')", "def test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some", "= self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location ] ) self.session.add(event)", "event.locations[0].name == u'Some location' data = { 'name': u'Some event' } form =", "= sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location def save(self, event=None, data=None):", "Location def save(self, event=None, data=None): if not data: data = { 'name': u'Some", "description' } if not event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else:", "u'Some location' data = { 'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate()", "== u'Some other location' assert location.description == u'' assert location.type == u'' assert", "= sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id", "form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self):", "nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location", "'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some other location',", "location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data =", "event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event, data) assert event.locations assert", "event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location'", "assert location.description == u'Location 2 description updated' assert len(event.locations) == 2 assert location2.name", "LocationForm(ModelForm): class Meta: model = self.Location only = ['name', 'description', 'type'] id =", "'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' }", "not event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data),", "only = ['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model", "sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id,", "self.session.commit() data = { 'name': u'Some event', 'locations-0-name': u'Some other location', } self.save(event,", "= sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location", "test_delete_all_field_list_entries(self): event = self.save() data = { 'name': u'Some event' } self.save(event, data)", "self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event) assert", "event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' assert event.locations[1].name == u'Some", "), location ] ) self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location',", "primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES = (u'',", "EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def", "self.save() data = { 'name': u'Some event' } self.save(event, data) assert not event.locations", "'locations-0-name': u'Some other location', } self.save(event, data) location = event.locations[0] assert location.id !=", "default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return", "[] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer,", "== u'Some other location' assert event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third", "LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm): class Meta: model = self.Event", "class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255),", "Location def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location only = ['name',", "location.description == u'Location 2 description updated' assert len(event.locations) == 2 assert location2.name ==", "u'Location 2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3', } self.save(event,", "name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id,", "== u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data", "sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type =", "other location' assert event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third location' assert", "sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)},", "= { 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name':", "TYPES = (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name =", "test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1", "= LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(),", "name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit() data", "location ] ) self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', }", "= { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location", "= { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant'", "location.name == u'Location 2 updated' assert location.description == u'Location 2 description updated' assert", "'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event", "ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first()", "self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase):", "= ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event =", "ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id =", "model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm", "'location' TYPES = (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name", "sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name)", "location2.name == u'Location 3' assert location2.description == u'' def test_multiple_entries(self): event = self.save()", "return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event self.Location = Location def", "12, 'locations-0-name': u'Some other location' } self.save(event, data) assert event.locations assert event.locations[0].id !=", "event.locations[0] location2 = event.locations[1] assert location.name == u'Location 2 updated' assert location.description ==", "import FormField from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy", "'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id self.session.commit()", "3', } self.save(event, data) self.session.commit() location = event.locations[0] location2 = event.locations[1] assert location.name", "event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id", "= ['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model =", "= event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other", "location = self.Location( name=u'Location #2' ) event = self.Event( name=u'Some event', locations=[ self.Location(", "'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name':", "sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event =", "nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u''", "if not event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form =", "type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))", "= EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location'", "location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id", "works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location'", "= self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__", "] ) self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event,", "other location', } self.save(event, data) location = event.locations[0] assert location.id != location_id assert", "u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event, data) assert event.locations", "event.locations[1] assert location.name == u'Location 2 updated' assert location.description == u'Location 2 description", "create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255),", "event.locations[0].id self.session.commit() data = { 'name': u'Some event', 'locations-0-name': u'Some other location', }", "4 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' assert event.locations[1].name", "create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location only = ['name', 'description', 'type']", "test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event", "'name': u'Some event' } self.save(event, data) assert not event.locations def test_update_and_remove(self): location =", "= sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES", "'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event, data) assert", "u'Some location' assert event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth location' def", "u'Location 2 updated' assert location.description == u'Location 2 description updated' assert len(event.locations) ==", "assert location.name == u'Location 2 updated' assert location.description == u'Location 2 description updated'", "data = { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' }", "def test_delete_all_field_list_entries(self): event = self.save() data = { 'name': u'Some event' } self.save(event,", "test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description',", "event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event =", "if not data: data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description':", "'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if not event:", "'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event, data) assert event.locations assert event.locations[0].id", "'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id", "def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name =", "class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True)", "form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self):", "2', 'locations-1-description': u'Location 2 description', } event = self.save(data=data) self.session.commit() data = {", "data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test", "class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm", "'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', } event", "__repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event self.Location = Location", "form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta:", "location' assert event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third location' assert event.locations[3].name", "import sqlalchemy as sa from wtforms.fields import FormField from wtforms_components import PassiveHiddenField from", "event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data = { 'name': u'Some event',", "assert location.id != location_id assert location.name == u'Some other location' assert location.description ==", "= 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id =", "return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location", "TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name", "'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location", "data=None): if not data: data = { 'name': u'Some event', 'locations-0-name': u'Some location',", "save(self, event=None, data=None): if not data: data = { 'name': u'Some event', 'locations-0-name':", "data) self.session.commit() location = event.locations[0] location2 = event.locations[1] assert location.name == u'Location 2", "2 updated' assert location.description == u'Location 2 description updated' assert len(event.locations) == 2", "<gh_stars>100-1000 import sqlalchemy as sa from wtforms.fields import FormField from wtforms_components import PassiveHiddenField", "data: data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description'", "assert event.locations[0].name == u'Some location' data = { 'name': u'Some event' } form", "form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations == []", "event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if not event: event =", "!= location_id assert location.name == u'Some other location' assert location.description == u'' assert", "def test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location", "autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event,", "== u'Location 2 description updated' assert len(event.locations) == 2 assert location2.name == u'Location", "updated' assert location.description == u'Location 2 description updated' assert len(event.locations) == 2 assert", "self.Event = Event self.Location = Location def create_forms(self): class LocationForm(ModelForm): class Meta: model", "{ 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location", "u'Some event' } self.save(event, data) assert not event.locations def test_update_and_remove(self): location = self.Location(", "test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data = {", "def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event self.Location =", "assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data = {", "1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', } event = self.save(data=data)", "event = self.save(data=data) self.session.commit() data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name':", "class Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm =", "1 def test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description':", "self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert", "event = self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-name':", "event = self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event =", "location.name == u'Some other location' assert location.description == u'' assert location.type == u''", "tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self):", "location' } self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id == location_id assert", "u'Some other location', } self.save(event, data) location = event.locations[0] assert location.id != location_id", "location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data = { 'name':", "sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES =", "event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)'", "{ 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event, data)", "u'' assert location.type == u'' assert len(event.locations) == 1 def test_replace_and_update(self): data =", "event.locations[0].name == u'Some other location' assert event.locations[1].name == u'Some location' assert event.locations[2].name ==", "def test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event = self.Event( name=u'Some event',", "assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id =", "location_id assert location.name == u'Some other location' assert location.description == u'' assert location.type", "sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location def save(self, event=None, data=None): if", "assert location.type == u'' assert len(event.locations) == 1 def test_replace_and_update(self): data = {", "PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase):", "event.locations[0].id != location_id def test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name': u'Some", "Event self.Location = Location def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location", "u'' assert len(event.locations) == 1 def test_replace_and_update(self): data = { 'name': u'Some event',", "self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data = { 'name':", "other location' assert location.description == u'' assert location.type == u'' assert len(event.locations) ==", "== 2 assert location2.name == u'Location 3' assert location2.description == u'' def test_multiple_entries(self):", "= 'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__", "== location_id assert event.locations[0].name == u'Some other location' assert event.locations[1].name == u'Some location'", "'locations-0-name': u'Some other location' } self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id", "location.id != location_id assert location.name == u'Some other location' assert location.description == u''", "u'Some description' } if not event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data))", "self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase):", "2 assert location2.name == u'Location 3' assert location2.description == u'' def test_multiple_entries(self): event", "EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data", "location_id, 'locations-0-name': u'Some other location' } self.save(event, data) assert len(event.locations) == 1 assert", "self.save(data=data) self.session.commit() data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2", "== 1 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' def", "self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm", "form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data = { 'name':", "sqlalchemy as sa from wtforms.fields import FormField from wtforms_components import PassiveHiddenField from tests", "2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3', } self.save(event, data)", "description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', } event = self.save(data=data) self.session.commit()", "id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(", "model = self.Location only = ['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm):", "event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location def save(self, event=None,", "TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm): class", "def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some", "'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations)", "id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location'", "'locations-0-name': u'Some other location' } self.save(event, data) assert event.locations assert event.locations[0].id != location_id", "= Event self.Location = Location def save(self, event=None, data=None): if not data: data", "} if not event: event = self.Event() self.session.add(event) form = self.EventForm(MultiDict(data)) else: form", "} self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id == location_id assert event.locations[0].name", "class Meta: model = self.Location only = ['name', 'description', 'type'] id = PassiveHiddenField()", "= event.locations[0].id data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id),", "nullable=False) class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name =", "field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description =", "self.Location( name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit() data = { 'locations-0-id':", "'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) == 1 assert", "== u'Third location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save()", "u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id = event.locations[0].id data =", "FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event =", "location2.description == u'' def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data =", "= self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm =", "= PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm),", "location_id assert event.locations[0].name == u'Some other location' assert event.locations[1].name == u'Some location' assert", "event', 'locations-0-name': u'Some other location', } self.save(event, data) location = event.locations[0] assert location.id", "sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id =", "u'Location 3' assert location2.description == u'' def test_multiple_entries(self): event = self.save() location_id =", "= sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' %", "{ 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event, data)", "event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location", "data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type':", "self.save(event, data) location = event.locations[0] assert location.id != location_id assert location.name == u'Some", "self.Location class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm =", "data = { 'name': u'Some event', 'locations-0-name': u'Some other location', } self.save(event, data)", "% (self.id, self.name) self.Event = Event self.Location = Location def create_forms(self): class LocationForm(ModelForm):", "assert event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event", "wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event'", "form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id =", "event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth", "== u'' assert len(event.locations) == 1 def test_replace_and_update(self): data = { 'name': u'Some", "self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ =", "u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event,", "ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event", "u'Some other location' assert event.locations[1].name == u'Some location' assert event.locations[2].name == u'Third location'", "'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id ==", "class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False)", "location_id = event.locations[0].id self.session.commit() data = { 'name': u'Some event', 'locations-0-name': u'Some other", "coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth", "wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm", "u'Third location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data", "data) location = event.locations[0] assert location.id != location_id assert location.name == u'Some other", "description updated' assert len(event.locations) == 2 assert location2.name == u'Location 3' assert location2.description", "u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description':", "u'Some other location' } self.save(event, data) assert event.locations assert event.locations[0].id != location_id def", "u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' } event = self.save(data=data) location_id =", "u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some", "data) assert not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event", "data) assert len(event.locations) == 1 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some", "'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations) == 4 assert", "== u'Location 3' assert location2.description == u'' def test_multiple_entries(self): event = self.save() location_id", "self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit()", "= self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self):", "= event.locations[0] assert location.id != location_id assert location.name == u'Some other location' assert", "= EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id']", "data = { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' }", "event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data = { 'name': u'Some event',", "assert event.locations[0].name == u'Some other location' assert event.locations[1].name == u'Some location' assert event.locations[2].name", "self.save(event, data) assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data = {", "= { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event,", "'name': u'Some event', 'locations-0-name': u'Some other location', } self.save(event, data) location = event.locations[0]", "u'Location 3', } self.save(event, data) self.session.commit() location = event.locations[0] location2 = event.locations[1] assert", "len(event.locations) == 2 assert location2.name == u'Location 3' assert location2.description == u'' def", "id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id))", "self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id", "event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data = { 'name':", "class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' )", "event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some other", "data) assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data = { 'name':", "== u'' assert location.type == u'' assert len(event.locations) == 1 def test_replace_and_update(self): data", "u'Location 2 description', } event = self.save(data=data) self.session.commit() data = { 'name': u'Some", "= self.Location only = ['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class", "location', 'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third", "2 description updated' assert len(event.locations) == 2 assert location2.name == u'Location 3' assert", "= self.Location( name=u'Location #2' ) event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location", "location = event.locations[0] assert location.id != location_id assert location.name == u'Some other location'", "location' assert event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self):", "name=u'Location #2' ) event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ),", "= sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id", "default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer,", "Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update' ) self.LocationForm = LocationForm", "u'Some other location' } self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id ==", "Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class", "self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self): event = self.save() location_id = event.locations[0].id data", "self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name':", "'locations-0-type': u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data = {", "FormField from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import", "model = self.Location class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm))", "= sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football field',", "event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location 3',", "event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location =", "'locations-1-id': str(location_id), # test coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location',", ") self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form", "self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-name': u'Some location',", "= self.save(data=data) self.session.commit() data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location", "data = { 'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit()", "test coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name':", "class LocationForm(ModelForm): class Meta: model = self.Location only = ['name', 'description', 'type'] id", "u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) == 1 assert event.locations[0] == location", "other location', 'locations-2-name': u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data)", "'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description", "def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data =", "u'Third location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations) ==", "event', locations=[ self.Location( name=u'Location #1' ), location ] ) self.session.add(event) self.session.commit() data =", "event' } self.save(event, data) assert not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location", "updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location = event.locations[0] location2 =", "test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event = self.Event( name=u'Some event', locations=[", "assert location2.name == u'Location 3' assert location2.description == u'' def test_multiple_entries(self): event =", "#2' ) event = self.Event( name=u'Some event', locations=[ self.Location( name=u'Location #1' ), location", "= Location def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location only =", "event = self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id':", "'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer,", "__tablename__ = 'location' TYPES = (u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True,", "wtforms.fields import FormField from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict from", "location_id assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save() location_id", "Event self.Location = Location def save(self, event=None, data=None): if not data: data =", "backref='locations') self.Event = Event self.Location = Location def save(self, event=None, data=None): if not", "= event.locations[1] assert location.name == u'Location 2 updated' assert location.description == u'Location 2", "sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location def save(self,", "updated' assert len(event.locations) == 2 assert location2.name == u'Location 3' assert location2.description ==", "['name', 'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event", "from tests import FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def", "assert location.name == u'Some other location' assert location.description == u'' assert location.type ==", "u'Some event', 'locations-0-name': u'Some other location', } self.save(event, data) location = event.locations[0] assert", "= self.Location class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm", "self.session.commit() data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated',", "def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form = self.EventForm(MultiDict(), obj=event) assert form.locations[0].data['id'] def test_single_entry_update(self):", "} self.save(event, data) location = event.locations[0] assert location.id != location_id assert location.name ==", "!= location_id def test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name': u'Some location',", "'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if not event: event = self.Event()", "sa from wtforms.fields import FormField from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase,", "__tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base):", "name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices':", "def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location only = ['name', 'description',", "assert location.description == u'' assert location.type == u'' assert len(event.locations) == 1 def", "event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event", "other location' } self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id == location_id", "'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description', 'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2", "= { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event,", "= self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class", "self.name) self.Event = Event self.Location = Location def create_forms(self): class LocationForm(ModelForm): class Meta:", "class Meta: model = self.Location class EventForm(ModelForm): class Meta: model = self.Event locations", "test_multiple_entries(self): event = self.save() location_id = event.locations[0].id data = { 'name': u'Some event',", "event.locations[0] assert location.id != location_id assert location.name == u'Some other location' assert location.description", "self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model =", "= self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations == [] class", "as sa from wtforms.fields import FormField from wtforms_components import PassiveHiddenField from tests import", "location_id def test_replace_entry(self): data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description':", "u'Fourth location' } self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id == location_id", "PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event locations = ModelFieldList( FormField(LocationForm), population_strategy='update'", "1 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self):", "assert len(event.locations) == 1 def test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name':", "class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm):", "self.save(event, data) assert len(event.locations) == 4 assert event.locations[0].id == location_id assert event.locations[0].name ==", "form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def", "self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm):", "name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True,", "location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) == 1 assert event.locations[0]", "Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True)", "u'Location 2', 'locations-1-description': u'Location 2 description', } event = self.save(data=data) self.session.commit() data =", "data) assert len(event.locations) == 4 assert event.locations[0].id == location_id assert event.locations[0].name == u'Some", "description updated', 'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location = event.locations[0] location2", "backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event = Event self.Location", "MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__", "description', } event = self.save(data=data) self.session.commit() data = { 'name': u'Some event', 'locations-0-id':", "== u'Some location' assert event.locations[2].name == u'Third location' assert event.locations[3].name == u'Fourth location'", "class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True)", "u'restaurant' } event = self.save(data=data) location_id = event.locations[0].id self.session.commit() data = { 'name':", "assert event.locations == [] class TestUpdateStrategy(ModelFieldListTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event'", "= { 'name': u'Some event' } self.save(event, data) assert not event.locations def test_update_and_remove(self):", "= { 'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event", "u'Some event', 'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description updated',", "= Event self.Location = Location def create_forms(self): class LocationForm(ModelForm): class Meta: model =", "self.session.add(event) form = self.EventForm(MultiDict(data)) else: form = self.EventForm(MultiDict(data), obj=event) form.validate() form.populate_obj(event) self.session.commit() return", "(self.id, self.name) self.Event = Event self.Location = Location def create_forms(self): class LocationForm(ModelForm): class", "ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id = sa.Column(sa.Integer, primary_key=True) name", "event class TestReplaceStrategy(ModelFieldListTestCase): def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location class", "event', 'locations-0-id': location_id, 'locations-0-name': u'Some other location' } self.save(event, data) assert len(event.locations) ==", "= { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) ==", "u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description", "'locations-1-name': u'Location 2', 'locations-1-description': u'Location 2 description', } event = self.save(data=data) self.session.commit() data", "= sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column( sa.Unicode(255), info={'choices': zip(TYPES,", "sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event self.Location = Location def", "def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location class EventForm(ModelForm): class Meta:", "__tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) event_id", "#1' ), location ] ) self.session.add(event) self.session.commit() data = { 'locations-0-id': location.id, 'locations-0-name':", "sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name", "'event' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ =", "location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some", "autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type = sa.Column(", "{ 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations) == 1", "FormRelationsTestCase, MultiDict from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base):", "location', 'locations-3-id': 123, 'locations-3-name': u'Fourth location' } self.save(event, data) assert len(event.locations) == 4", "class Meta: model = self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm =", "assert event.locations[3].name == u'Fourth location' def test_delete_all_field_list_entries(self): event = self.save() data = {", "sa.Column( sa.Unicode(255), info={'choices': zip(TYPES, TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event =", "location' assert location.description == u'' assert location.type == u'' assert len(event.locations) == 1", "data = { 'locations-0-id': location.id, 'locations-0-name': u'Location', } self.save(event, data) self.session.refresh(event) assert len(event.locations)", "{ 'name': u'Some event', 'locations-0-name': u'Some other location', } self.save(event, data) location =", "'description', 'type'] id = PassiveHiddenField() class EventForm(ModelForm): class Meta: model = self.Event locations", "= event.locations[0] location2 = event.locations[1] assert location.name == u'Location 2 updated' assert location.description", "2 description', } event = self.save(data=data) self.session.commit() data = { 'name': u'Some event',", "# test coercing works 'locations-1-name': u'Some other location', 'locations-2-name': u'Third location', 'locations-3-id': 123,", "3' assert location2.description == u'' def test_multiple_entries(self): event = self.save() location_id = event.locations[0].id", "assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self): data = { 'name': u'Some", "name = sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event", "self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data = { 'name': u'Some event' }", "other location' } self.save(event, data) assert event.locations assert event.locations[0].id != location_id def test_replace_entry(self):", "= self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data = { 'name': u'Some event'", "self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some", "(u'', u'football field', u'restaurant') id = sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True)", "location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': location_id, 'locations-0-name': u'Some", "} event = self.save(data=data) self.session.commit() data = { 'name': u'Some event', 'locations-0-id': event.locations[1].id,", "not event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event = self.Event(", "self.Event locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save()", "= sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event =", "{ 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description', 'locations-0-type': u'restaurant' }", "'locations-1-name': u'Location 3', } self.save(event, data) self.session.commit() location = event.locations[0] location2 = event.locations[1]", "from wtforms.fields import FormField from wtforms_components import PassiveHiddenField from tests import FormRelationsTestCase, MultiDict", "== location_id assert event.locations[0].name == u'Some other location' def test_creates_new_objects_for_entries_with_unknown_identifiers(self): event = self.save()", "= sa.Column(sa.Integer, autoincrement=True, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=True) description = sa.Column(sa.Unicode(255), default=u'') type", "event = self.save() data = { 'name': u'Some event' } self.save(event, data) assert", "= self.save() location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-name': u'Some", "sa.Column(sa.Unicode(255), nullable=True) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') self.Event = Event", "len(event.locations) == 1 def test_replace_and_update(self): data = { 'name': u'Some event', 'locations-0-name': u'Location", "= sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)", "location' } self.save(event, data) assert len(event.locations) == 1 assert event.locations[0].id == location_id assert", "data = { 'name': u'Some event', 'locations-0-name': u'Location 1', 'locations-0-description': u'Location 1 description',", "u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some description' } if not event: event", "from wtforms_alchemy import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ =", "event.locations[0].id data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id': str(location_id), #", "import ModelFieldList, ModelForm class ModelFieldListTestCase(FormRelationsTestCase): def create_models(self): class Event(self.base): __tablename__ = 'event' id", "event.locations def test_update_and_remove(self): location = self.Location( name=u'Location #2' ) event = self.Event( name=u'Some", "'locations-0-id': event.locations[1].id, 'locations-0-name': u'Location 2 updated', 'locations-0-description': u'Location 2 description updated', 'locations-1-name': u'Location", "= event.locations[0].id data = { 'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other", "{ 'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event =", "TYPES)}, default=u'' ) event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self):", "event = self.session.query(self.Event).first() assert event.locations[0].name == u'Some location' data = { 'name': u'Some", "'name': u'Some event', 'locations-0-id': 12, 'locations-0-name': u'Some other location' } self.save(event, data) assert", "sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id =", "not data: data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-0-description': u'Some", "u'Location 2 description updated' assert len(event.locations) == 2 assert location2.name == u'Location 3'", "name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' TYPES = (u'', u'football", ") event_id = sa.Column(sa.Integer, sa.ForeignKey(Event.id)) event = sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r,", "= sa.orm.relationship(Event, backref='locations') def __repr__(self): return 'Location(id=%r, name=%r)' % (self.id, self.name) self.Event =", "location_id = event.locations[0].id data = { 'name': u'Some event', 'locations-0-name': u'Some location', 'locations-1-id':", "locations = ModelFieldList(FormField(LocationForm)) self.LocationForm = LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event", "u'Some location', 'locations-0-description': u'Some description' } if not event: event = self.Event() self.session.add(event)", "self.Location = Location def create_forms(self): class LocationForm(ModelForm): class Meta: model = self.Location only", "location = event.locations[0] location2 = event.locations[1] assert location.name == u'Location 2 updated' assert", "= LocationForm self.EventForm = EventForm def test_assigment_and_deletion(self): self.save() event = self.session.query(self.Event).first() assert event.locations[0].name", "u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event) self.session.commit() event = self.session.query(self.Event).first() assert", "self.LocationForm = LocationForm self.EventForm = EventForm def test_with_none_as_formdata_for_existing_objects(self): event = self.save() form =", "location' data = { 'name': u'Some event' } form = self.EventForm(MultiDict(data)) form.validate() form.populate_obj(event)", "primary_key=True) name = sa.Column(sa.Unicode(255), nullable=False) class Location(self.base): __tablename__ = 'location' id = sa.Column(sa.Integer," ]
[]
[ "lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name", "# predict and test comp = [prd - y][0] Acc = np.sum(comp ==", "as np import pandas as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils')", "= [int(d) for d in y] # predict and test comp = [prd", "s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna()", "d in y] # predict and test comp = [prd - y][0] Acc", "test comp = [prd - y][0] Acc = np.sum(comp == 0) / len(comp)", "numpy as np import pandas as pd from lc_read_write_Mat import read_mat import sys", "X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X", "= read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) # y s =", "0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in y]", "dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy", "= xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X =", "axis=1) # NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y =", "= pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy", "-*- coding: utf-8 -*- \"\"\" Created on Tue Aug 28 15:07:49 2018 @author:", "coding: utf-8 -*- \"\"\" Created on Tue Aug 28 15:07:49 2018 @author: lenovo", "import oneVsRest import numpy as np import pandas as pd from lc_read_write_Mat import", "np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in y] # predict and", "import pandas as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') #", "[int(d) for d in y] # predict and test comp = [prd -", "dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X =", "dataset = read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) # y s", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Aug 28 15:07:49 2018", "xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X,", "X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in y] #", "sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset", "s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1)", "xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X", "Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import", "as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName", "pandas as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X", "xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y =", "= s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy =", "Created on Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import", "oneVsRest import numpy as np import pandas as pd from lc_read_write_Mat import read_mat", "and test comp = [prd - y][0] Acc = np.sum(comp == 0) /", "y] # predict and test comp = [prd - y][0] Acc = np.sum(comp", "NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values", "from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709'", "r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X", "pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy =", "1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d)", "= xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y", "X = dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns =", "read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx')", "@author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as np import pandas", "<reponame>dongmengshi/easylearn<filename>eslearn/utils/test.py # -*- coding: utf-8 -*- \"\"\" Created on Tue Aug 28 15:07:49", "fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X =", "for d in y] # predict and test comp = [prd - y][0]", "dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values #", "xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() # X =", "# y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns),", "X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]])", "\"\"\" Created on Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest", "15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as np", "read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef'", "import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name =", "lc_svc_oneVsRest import oneVsRest import numpy as np import pandas as pd from lc_read_write_Mat", "sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName,", "2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as np import", "# comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() #", "= 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X)", "X], axis=1) # NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y", "# X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name)", "\"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as np import pandas as pd", "pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN", "pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values", "dataset_name) X = dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns", "28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as", "xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in", "X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb", "predict and test comp = [prd - y][0] Acc = np.sum(comp == 0)", "pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName =", "# NaN xandy = xandy.dropna() # X = xandy.iloc[:, 1:].values y = xandy.iloc[:,", "X.shape[1]]) y = [int(d) for d in y] # predict and test comp", "= xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d", "utf-8 -*- \"\"\" Created on Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\"", "y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X],", "= np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for d in y] # predict", "lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy as np import pandas as", "np import pandas as pd from lc_read_write_Mat import read_mat import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication')", "from lc_svc_oneVsRest import oneVsRest import numpy as np import pandas as pd from", "comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() # X", "= pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values # comb xandy = pd.concat([pd.DataFrame(dgns), X], axis=1) #", "Aug 28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest import numpy", "y = [int(d) for d in y] # predict and test comp =", "in y] # predict and test comp = [prd - y][0] Acc =", "'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) #", "= r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset", "dataset_struct, dataset = read_mat(fileName, dataset_name) X = dataset X = pd.DataFrame(X) # y", "[len(X), X.shape[1]]) y = [int(d) for d in y] # predict and test", "= dataset X = pd.DataFrame(X) # y s = pd.read_excel(r'J:\\分类测试_20180828\\机器学习-ID.xlsx') dgns = s['诊断'].values", "sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct, dataset =", "y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X), X.shape[1]]) y = [int(d) for", "# X = xandy.iloc[:, 1:].values y = xandy.iloc[:, 0].values X = np.reshape(X, [len(X),", "-*- \"\"\" Created on Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\" from", "on Tue Aug 28 15:07:49 2018 @author: lenovo \"\"\" from lc_svc_oneVsRest import oneVsRest", "= pd.concat([pd.DataFrame(dgns), X], axis=1) # NaN xandy = xandy.dropna() # X = xandy.iloc[:,", "import numpy as np import pandas as pd from lc_read_write_Mat import read_mat import", "import sys sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\utils') sys.path.append(r'D:\\myCodes\\LC_MVPA\\Python\\MVPA_Python\\classfication') # X fileName = r'J:\\分类测试_20180828\\Ne-L_VS_Ne-R_n=709' dataset_name = 'coef' dataset_struct," ]
[ "import views app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view(), name='index'), path('', views.create,", "<filename>datamgt/urls.py from django.urls import path from . import views app_name = 'datamgt' urlpatterns", "django.urls import path from . import views app_name = 'datamgt' urlpatterns = [", "views app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view(), name='index'), path('', views.create, name='index'),", ". import views app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view(), name='index'), path('',", "from . import views app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view(), name='index'),", "from django.urls import path from . import views app_name = 'datamgt' urlpatterns =", "app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view(), name='index'), path('', views.create, name='index'), ]", "import path from . import views app_name = 'datamgt' urlpatterns = [ #path('',", "path from . import views app_name = 'datamgt' urlpatterns = [ #path('', views.DataMgtPageView.as_view()," ]
[ "Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception", "during BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of", "message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing /", "BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an", "<https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide", "typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize", "(c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\"", "the exception instance. :param str message: The user-intended exception message \"\"\" super().__init__(message) self.message", "exception instance. :param str message: The user-intended exception message \"\"\" super().__init__(message) self.message =", "user-intended exception message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar", "\"\"\"Contains custom exceptions and errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception", "SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception instance. :param", "The user-intended exception message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during", "2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from", "import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the", "message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException):", "/ transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is", "from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str):", "__init__(self, message: str): \"\"\"Initialize the exception instance. :param str message: The user-intended exception", "# Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions", "encoding: utf-8 -*- # Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc>", "super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\"", "and errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self,", "custom exceptions and errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\"", "class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is shown to have conflicts.\"\"\"", "self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass", "<<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing import", "= message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass class", "message: str): \"\"\"Initialize the exception instance. :param str message: The user-intended exception message", "\"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception instance. :param str", "\"\"\"Initialize the exception instance. :param str message: The user-intended exception message \"\"\" super().__init__(message)", "str): \"\"\"Initialize the exception instance. :param str message: The user-intended exception message \"\"\"", "# -*- encoding: utf-8 -*- # Copyright (c) 2019 <NAME> <<EMAIL>> # ISC", "-*- # Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom", "-*- encoding: utf-8 -*- # Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License", "pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is shown to have", "\"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation", "Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and", ":param str message: The user-intended exception message \"\"\" super().__init__(message) self.message = message class", "def __init__(self, message: str): \"\"\"Initialize the exception instance. :param str message: The user-intended", "exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception instance. :param str message:", "str message: The user-intended exception message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException):", "<NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing", "namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception instance. :param str message: The", "failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is shown to", "InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is shown to have conflicts.\"\"\" pass", "instance. :param str message: The user-intended exception message \"\"\" super().__init__(message) self.message = message", "exception message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing", "\"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation", "License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing import Optional class SemselException(Exception):", "exceptions and errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def", "class ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised", "class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message: str): \"\"\"Initialize the exception instance.", "parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression", "ParseFailure(SemselException): \"\"\"Raised during BNF-grammar parsing / transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when", "transformation failures.\"\"\" pass class InvalidExpression(SemselException): \"\"\"Raised when evaluation of an expression is shown", "errors.\"\"\" from typing import Optional class SemselException(Exception): \"\"\"Module-wide exception namespace.\"\"\" def __init__(self, message:", "message: The user-intended exception message \"\"\" super().__init__(message) self.message = message class ParseFailure(SemselException): \"\"\"Raised", "# ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing import Optional", "ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains custom exceptions and errors.\"\"\" from typing import Optional class", "utf-8 -*- # Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://choosealicense.com/licenses/isc> \"\"\"Contains" ]
[]
[ "__init__(self,data = None): self.data = data self.reference = None #EXECUTION objNode1 = Node(1)", "= Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference =", "objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode = objNode1", "objNode3 = Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference", "= objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode =", "objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode", "objNode4 objNode4.reference = None presentNode = objNode1 while presentNode: print(\"DATA VALUE = \",presentNode.data)", "objNode2 = Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference", "= data self.reference = None #EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3", "Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4", "Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3", "objNode4.reference = None presentNode = objNode1 while presentNode: print(\"DATA VALUE = \",presentNode.data) presentNode", "= Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference =", "= None): self.data = data self.reference = None #EXECUTION objNode1 = Node(1) objNode2", "= Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference =", "<gh_stars>0 class Node: def __init__(self,data = None): self.data = data self.reference = None", "data self.reference = None #EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3 =", "objNode1 = Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference", "= objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode = objNode1 while presentNode:", "def __init__(self,data = None): self.data = data self.reference = None #EXECUTION objNode1 =", "None #EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4 =", "objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode = objNode1 while presentNode: print(\"DATA", "objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference = None presentNode = objNode1 while", "self.data = data self.reference = None #EXECUTION objNode1 = Node(1) objNode2 = Node(2)", "objNode4 = Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference", "Node: def __init__(self,data = None): self.data = data self.reference = None #EXECUTION objNode1", "self.reference = None #EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3 = Node(3)", "None): self.data = data self.reference = None #EXECUTION objNode1 = Node(1) objNode2 =", "None presentNode = objNode1 while presentNode: print(\"DATA VALUE = \",presentNode.data) presentNode = presentNode.reference", "= None presentNode = objNode1 while presentNode: print(\"DATA VALUE = \",presentNode.data) presentNode =", "Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4 = Node(4) objNode1.reference = objNode2", "#EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4 = Node(4)", "= Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference =", "Node(4) objNode1.reference = objNode2 objNode2.reference = objNode3 objNode3.reference = objNode4 objNode4.reference = None", "= objNode4 objNode4.reference = None presentNode = objNode1 while presentNode: print(\"DATA VALUE =", "objNode3.reference = objNode4 objNode4.reference = None presentNode = objNode1 while presentNode: print(\"DATA VALUE", "= None #EXECUTION objNode1 = Node(1) objNode2 = Node(2) objNode3 = Node(3) objNode4", "class Node: def __init__(self,data = None): self.data = data self.reference = None #EXECUTION" ]
[ "embeddings are non-static.\"\"\" # if only one sentence is passed, convert to list", "Dict: params = dict() params['name'] = self.key_name params['append'] = True params.update(self.results) return params", "a list of sentences. If embeddings are already added, updates only if embeddings", "cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No evalutor", "params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results =", "embeddings to all words in a list of sentences. If embeddings are already", "all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing import List, Union, Dict", "axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0)", "@property def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs)", "list of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def", "= dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self,", "**kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0)", "\"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval):", "Sentence('Mein Name ist Paul', preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1) #", "return cls(*args, **kwargs) raise ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\"", "evaluator :synopsis: Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing import", "{'ground_truth':2}) # ] # sentence_b = [ # Sentence('Hi du, wie geht\\'s?', preprocessor),", "self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict: params = dict() params['name']", "# ] # # clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method':", "# sentence_a = [ # Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth':", "method for adding embeddings to all words in a list of sentences.\"\"\" pass", "linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [ #", "= [ # Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}), #", "dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ]", "already added, updates only if embeddings are non-static.\"\"\" # if only one sentence", "geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2})", "# from sklearn import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # #", "] # # clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}])", "List, Union, Dict from abc import ABC, abstractmethod import numpy as np import", "import List, Union, Dict from abc import ABC, abstractmethod import numpy as np", "# y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] =", "mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence # from sklearn import linear_model,", "scipy.stats import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import Sentence def Evaluator(name,", "return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words", "list of sentences. If embeddings are already added, updates only if embeddings are", "Dict: params = dict() params['name'] = self.key_name params['append'] = False params['diff_dict'] = self.diff_dict", "{ 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval", "in a list of sentences. If embeddings are already added, updates only if", "np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'],", "numpy as np import pandas as pd from scipy.stats import pearsonr from sklearn.metrics", "params['name'] = self.key_name params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self):", "class must inherit from this class \"\"\" @property def key_name(self): \"\"\"Name must be", "import classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator", "= None self.diff_dict = {} @property def key_name(self): \"\"\"Name must be unique!\"\"\" return", "params['name'] = self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return params if __name__", "__init__(self): super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\" return", "from abc import ABC, abstractmethod import numpy as np import pandas as pd", "via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return", "%f' % self.results['pearson'][0]) def get_params(self) -> Dict: params = dict() params['name'] = self.key_name", "moduleauthor:: <NAME> \"\"\" from typing import List, Union, Dict from abc import ABC,", "List[Sentence]: \"\"\"Private method for adding embeddings to all words in a list of", "'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params =", "return params if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence", "import ABC, abstractmethod import numpy as np import pandas as pd from scipy.stats", "_evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train", "Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo du, wie geht es dir?',", "must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings", "True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict", ".. moduleauthor:: <NAME> \"\"\" from typing import List, Union, Dict from abc import", "{'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b", "clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator =", "def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words in a", "# from mtc.core.sentence import Sentence # from sklearn import linear_model, ensemble # #", "\"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): #", "__name__ == '__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence #", "dict() params['name'] = self.key_name params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def", "np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'],", "y_test)[0]] # print('on training set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' %", "Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b = [ #", "def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] = True", "key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval,", "params if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import", "be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train =", "PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name must be", "def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings to all", "unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = {", "must inherit from this class \"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\"", "= self.diff_dict return params if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor #", "mtc.core.sentence import Sentence # from sklearn import linear_model, ensemble # # preprocessor =", "**kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words in a list of sentences.", "'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b) # classifier.predict(sentence_a, sentence_b) #", "in a list of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass class", "Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor), # ]", "if embeddings are non-static.\"\"\" # if only one sentence is passed, convert to", "self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc: %f' % self.results['pearson'][0])", "'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params = dict() params['name']", "= [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set with", "y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params = dict()", "must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict", "'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params = dict() params['name'] = self.key_name", "embeddings to all words in a list of sentences.\"\"\" pass @abstractmethod def get_params(self)", "clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b) # classifier.predict(sentence_a,", "axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training", "of sentences. If embeddings are already added, updates only if embeddings are non-static.\"\"\"", "embeddings are already added, updates only if embeddings are non-static.\"\"\" # if only", "Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property def key_name(self):", "named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from", "f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0)", "abc import ABC, abstractmethod import numpy as np import pandas as pd from", "added, updates only if embeddings are non-static.\"\"\" # if only one sentence is", "all words in a list of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict:", "passed, convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence])", "] # sentence_b = [ # Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein", "List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings to all words in a", "one sentence is passed, convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def", "in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No evalutor named", "from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should", "test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) ->", "-> Dict: params = dict() params['name'] = self.key_name params['append'] = True params.update(self.results) return", "pd from scipy.stats import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import Sentence", "# Sentence('Mein Name ist Paul', preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1)", "# # clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) #", "dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval,", "classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b) # classifier.predict(sentence_a, sentence_b)", "be called via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ ==", "from this class \"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__", "training set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self)", "abstractmethod import numpy as np import pandas as pd from scipy.stats import pearsonr", "-> Dict: params = dict() params['name'] = self.key_name params['append'] = False params['diff_dict'] =", "inherit from this class \"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\" return", "List[Sentence]: \"\"\"Add embeddings to all words in a list of sentences. If embeddings", "rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params = dict() params['name'] =", "from mtc.core.sentence import Sentence # from sklearn import linear_model, ensemble # # preprocessor", "Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should be called via", "of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self):", "[pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc:", "self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return params if __name__ == '__main__':", "rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a':", "cls(*args, **kwargs) raise ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any", "module:: evaluator :synopsis: Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing", "def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args,", "du, wie geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.',", "cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No evalutor named %s' % name)", "to all words in a list of sentences.\"\"\" pass @abstractmethod def get_params(self) ->", "# self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc: %f' %", "False params['diff_dict'] = self.diff_dict return params if __name__ == '__main__': from mtc.core.preprocessor import", "def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should be called via this", "Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b = [ # Sentence('Hi du, wie", "exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'],", "params = dict() params['name'] = self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return", "evaluator class must inherit from this class \"\"\" @property def key_name(self): \"\"\"Name must", "if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence", "Sentence # from sklearn import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') #", "\"\"\" All evaluator classes should be called via this method \"\"\" for cls", "= np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]]", "self.results = None self.diff_dict = {} @property def key_name(self): \"\"\"Name must be unique!\"\"\"", "[pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f'", "Preprocessor # from mtc.core.sentence import Sentence # from sklearn import linear_model, ensemble #", "\"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs) raise", "exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on", "from sklearn import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a", "sklearn import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a =", "self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words in", "raise ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class", "_evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index,", "%s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from this", "= dict() params['name'] = self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return params", "PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict = {} @property def key_name(self):", "rsa_b_eval } def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append']", "be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to", "self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding", "-> List[Sentence]: \"\"\"Add embeddings to all words in a list of sentences. If", "def __init__(self): super().__init__() self.results = None self.diff_dict = {} @property def key_name(self): \"\"\"Name", "are non-static.\"\"\" # if only one sentence is passed, convert to list of", "y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] =", "print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict: params = dict() params['name'] =", "get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property", "classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a,", "sentence is passed, convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self,", "np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] #", "words in a list of sentences. If embeddings are already added, updates only", "name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from this class \"\"\"", "*args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words in a list of", "{} @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval,", "**kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings", "pandas as pd from scipy.stats import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence", "'__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence # from sklearn", "# # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo du,", "# preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo du, wie", "sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings to all words in", "= False params['diff_dict'] = self.diff_dict return params if __name__ == '__main__': from mtc.core.preprocessor", "Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing import List, Union,", "'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval }", "unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'],", "Paul', preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier',", "pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict: params", "params['diff_dict'] = self.diff_dict return params if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor", "ist Paul', preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1) # classifier =", "words in a list of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass", "super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\"", "super().__init__() self.results = None self.diff_dict = {} @property def key_name(self): \"\"\"Name must be", "= True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None", "exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] #", "self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval,", "import Preprocessor # from mtc.core.sentence import Sentence # from sklearn import linear_model, ensemble", "wie geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor,", "@abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings to", "Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should be called via this method", "class \"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self,", "evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit", "# y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson']", "@property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted,", "# Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor), #", "return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict = {}", "__init__(self): super().__init__() self.results = None self.diff_dict = {} @property def key_name(self): \"\"\"Name must", "Dict from abc import ABC, abstractmethod import numpy as np import pandas as", "'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self)", "\"\"\" from typing import List, Union, Dict from abc import ABC, abstractmethod import", "this class \"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def", "evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing import List, Union, Dict from", "ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must", "# y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) #", "== '__main__': from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence # from", "% self.results['pearson'][0]) def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append']", "wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor), # ] # #", "evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all words in a list", "self.diff_dict = {} @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def", "preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ] #", "'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict:", "return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'],", "[ # Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein", "\"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add", "unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]: \"\"\"Add embeddings to all", "import Sentence # from sklearn import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor')", "= np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test =", "rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted,", "Any evaluator class must inherit from this class \"\"\" @property def key_name(self): \"\"\"Name", "class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from this class \"\"\" @property", "None self.diff_dict = {} @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\"", "key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs):", "class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name must", "= Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo du, wie geht es", "exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'],", "# classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # #", "with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict:", "if cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No evalutor named %s' %", "EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No evalutor named %s'", "# sentence_b = [ # Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name", "called via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name:", "preprocessor), # Sentence('Mein Name ist Paul', preprocessor), # ] # # clf =", "from scipy.stats import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import Sentence def", "evaluator classes should be called via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__():", "If embeddings are already added, updates only if embeddings are non-static.\"\"\" # if", "get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] = False params['diff_dict']", "typing import List, Union, Dict from abc import ABC, abstractmethod import numpy as", "sklearn.metrics import classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All", "import pandas as pd from scipy.stats import pearsonr from sklearn.metrics import classification_report from", "All evaluator classes should be called via this method \"\"\" for cls in", "# ] # sentence_b = [ # Sentence('Hi du, wie geht\\'s?', preprocessor), #", "to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]:", "only one sentence is passed, convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod", "as pd from scipy.stats import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import", "for adding embeddings to all words in a list of sentences.\"\"\" pass @abstractmethod", "# print('on training set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0])", "axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0)", "def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] = False", "-> List[Sentence]: \"\"\"Private method for adding embeddings to all words in a list", "import linear_model, ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [", "adding embeddings to all words in a list of sentences.\"\"\" pass @abstractmethod def", "y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test", "pass @abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results", "self.diff_dict return params if __name__ == '__main__': from mtc.core.preprocessor import Preprocessor # from", "self.results['pearson'][0]) def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] =", "**kwargs) raise ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator", "y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test,", "du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor), # ] #", "from typing import List, Union, Dict from abc import ABC, abstractmethod import numpy", "*args, **kwargs): \"\"\" All evaluator classes should be called via this method \"\"\"", "= linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE')", "3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b =", "sentence_a = [ # Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}),", "_evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for adding embeddings to all words", "be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict =", "method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs)", "% name) class EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from this class", "} def get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] =", "\"\"\" @property def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args,", "y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'],", "es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) #", "= {} @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self,", "axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set']", "= { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b':", "= [pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc: %f' % self.results['pearson'][0]) print('PCC:", "self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set", "def key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) ->", "ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b = [ # Sentence('Hi du,", "\"\"\"Private method for adding embeddings to all words in a list of sentences.\"\"\"", "= self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return params if __name__ ==", "def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index,", "updates only if embeddings are non-static.\"\"\" # if only one sentence is passed,", "classes should be called via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if", "key_name(self): \"\"\"Name must be unique!\"\"\" return self.__class__.__name__ def evaluate(self, *args, **kwargs) -> List[Sentence]:", "ensemble # # preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo", "= dict() params['name'] = self.key_name params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass):", "\"\"\"Add embeddings to all words in a list of sentences. If embeddings are", "params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict =", "y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train =", "# # sentence_a = [ # Sentence('Hallo du, wie geht es dir?', preprocessor,", "sentence_b = [ # Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist", "import numpy as np import pandas as pd from scipy.stats import pearsonr from", "def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx':", "sentences. If embeddings are already added, updates only if embeddings are non-static.\"\"\" #", "= Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b)", "name: return cls(*args, **kwargs) raise ValueError('No evalutor named %s' % name) class EvaluatorBaseClass(ABC):", "all words in a list of sentences. If embeddings are already added, updates", "= self.key_name params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__()", "non-static.\"\"\" # if only one sentence is passed, convert to list of sentence", "test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred':", "import pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args,", "self.key_name params['append'] = True params.update(self.results) return params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results", "preprocessor = Preprocessor('DefaultPreprocessor') # # sentence_a = [ # Sentence('Hallo du, wie geht", "@abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results =", "# Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name", "classes! .. moduleauthor:: <NAME> \"\"\" from typing import List, Union, Dict from abc", "EvaluatorBaseClass(ABC): \"\"\" Any evaluator class must inherit from this class \"\"\" @property def", "class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict = {} @property def", ".. module:: evaluator :synopsis: Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from", "def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict()", "def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) #", "[ # Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor),", "geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul', preprocessor), # ] # # clf", "print('on training set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def", "Sentence('Hallo du, wie geht es dir?', preprocessor, {'ground_truth': 3}), # Sentence('Mein Name ist", "from sklearn.metrics import classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\"", "y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval,", "sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method for", "convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) ->", "\"\"\" Any evaluator class must inherit from this class \"\"\" @property def key_name(self):", "y_eval)[0]] # self.results['pearson_test_set'] = [pearsonr(y_pred_test, y_test)[0]] # print('on training set with pcc: %f'", "y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard':", "set with pcc: %f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) ->", "as np import pandas as pd from scipy.stats import pearsonr from sklearn.metrics import", "% self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict: params = dict()", "params class PredictionAccuracyBySentence(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = None self.diff_dict = {} @property", "to all words in a list of sentences. If embeddings are already added,", "preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf,", "== name: return cls(*args, **kwargs) raise ValueError('No evalutor named %s' % name) class", "if only one sentence is passed, convert to list of sentence self._evaluate_internal(*args, **kwargs)", "sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__()", "pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name", "of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private method", "for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args, **kwargs) raise ValueError('No", "get_params(self) -> Dict: params = dict() params['name'] = self.key_name params['append'] = True params.update(self.results)", "Name ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b = [ # Sentence('Hi", "params['append'] = False params['diff_dict'] = self.diff_dict return params if __name__ == '__main__': from", "pearsonr from sklearn.metrics import classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs):", "y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def get_params(self) -> Dict: params", "self.results = dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\" return f\"{self.__class__.__name__}\" def", "# clf = linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator", "import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should be called", "preprocessor, {'ground_truth':2}) # ] # sentence_b = [ # Sentence('Hi du, wie geht\\'s?',", "= np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted,", "%f' % self.results['pearson'][0]) print('PCC: %f' % self.results['pearson'][0]) def get_params(self) -> Dict: params =", "is passed, convert to list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences:", "np import pandas as pd from scipy.stats import pearsonr from sklearn.metrics import classification_report", ":synopsis: Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\" from typing import List,", "return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff':", "classification_report from mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes", "list(abs(y_eval-y_eval_predicted)), 'sen_idx': test_index, 'gold_standard': y_eval, 'pred': y_eval_predicted, 'raw_sentences_a': rsa_a_eval, 'raw_sentences_b': rsa_b_eval } def", "mtc.core.sentence import Sentence def Evaluator(name, *args, **kwargs): \"\"\" All evaluator classes should be", "\"\"\" .. module:: evaluator :synopsis: Holding all evaluator classes! .. moduleauthor:: <NAME> \"\"\"", "-> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass): def __init__(self): super().__init__() self.results = dict() @property def", "this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__ == name: return cls(*args,", "*args, **kwargs): # y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'],", "are already added, updates only if embeddings are non-static.\"\"\" # if only one", "f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, test_index, rsa_a_eval, rsa_b_eval): self.diff_dict = { 'diff': list(abs(y_eval-y_eval_predicted)),", "ABC, abstractmethod import numpy as np import pandas as pd from scipy.stats import", "# y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) #", "list of sentence self._evaluate_internal(*args, **kwargs) @abstractmethod def _evaluate_internal(self, sentences: List[Sentence]) -> List[Sentence]: \"\"\"Private", "np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test = np.take(exp_data['y_pred'], exp_data['idx_dev'], axis=0) self.results['pearson'] = [pearsonr(y_eval_predicted, y_eval)[0]]", "should be called via this method \"\"\" for cls in EvaluatorBaseClass.__subclasses__(): if cls.__name__", "must be unique!\"\"\" return f\"{self.__class__.__name__}\" def _evaluate_internal(self, y_eval, y_eval_predicted, *args, **kwargs): # y_train", "params = dict() params['name'] = self.key_name params['append'] = True params.update(self.results) return params class", "linear_model.Lasso(alpha=0.1) # classifier = Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') #", "# evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b) # classifier.predict(sentence_a, sentence_b) # evaluator.evaluate(sentence_a[0])", "y_train = np.take(exp_data['y'], exp_data['idx_train'], axis=0) # y_pred_train = np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test", "= [ # Sentence('Hi du, wie geht\\'s?', preprocessor), # Sentence('Mein Name ist Paul',", "Name ist Paul', preprocessor), # ] # # clf = linear_model.Lasso(alpha=0.1) # classifier", "# if only one sentence is passed, convert to list of sentence self._evaluate_internal(*args,", "**kwargs): \"\"\" All evaluator classes should be called via this method \"\"\" for", "from mtc.core.preprocessor import Preprocessor # from mtc.core.sentence import Sentence # from sklearn import", "def __init__(self): super().__init__() self.results = dict() @property def key_name(self): \"\"\"Name must be unique!\"\"\"", "<NAME> \"\"\" from typing import List, Union, Dict from abc import ABC, abstractmethod", "Classifier('SelectiveClassifier', clf=clf, classifier_methods=[{'method': 'sequence_matcher_similarity'}]) # evaluator = Evaluator('PCCE') # # classifier.fit(sentence_a, sentence_b) #", "dict() params['name'] = self.key_name params['append'] = False params['diff_dict'] = self.diff_dict return params if", "Union, Dict from abc import ABC, abstractmethod import numpy as np import pandas", "only if embeddings are non-static.\"\"\" # if only one sentence is passed, convert", "= np.take(exp_data['y_pred'], exp_data['idx_train'], axis=0) # y_test = np.take(exp_data['y'], exp_data['idx_dev'], axis=0) # y_pred_test =", "# Sentence('Mein Name ist Tina.', preprocessor, {'ground_truth':2}) # ] # sentence_b = [", "a list of sentences.\"\"\" pass @abstractmethod def get_params(self) -> Dict: pass class PearsonCorrelationCoefficientEvaluator(EvaluatorBaseClass):" ]
[ ".views import documentation from .router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns =", "rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')),", "django.urls import path, include from .views import documentation from .router import router from", "include from .views import documentation from .router import router from rest_framework_jwt.views import obtain_jwt_token", "import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')), path('',", "path, include from .views import documentation from .router import router from rest_framework_jwt.views import", "import documentation from .router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [", "admin from django.urls import path, include from .views import documentation from .router import", "from .router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token),", "= [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')), path('', include(router.urls)), path('admin/', admin.site.urls),", "django.contrib import admin from django.urls import path, include from .views import documentation from", "import admin from django.urls import path, include from .views import documentation from .router", "router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'),", "urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')), path('', include(router.urls)), path('admin/',", "documentation from .router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/',", "from .views import documentation from .router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns", "import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation,", "obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')), path('', include(router.urls)),", "[ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('', include('main.urls')), path('', include(router.urls)), path('admin/', admin.site.urls), ]", "from django.contrib import admin from django.urls import path, include from .views import documentation", "from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path('', documentation, name='documentation'), path('',", "from django.urls import path, include from .views import documentation from .router import router", "import path, include from .views import documentation from .router import router from rest_framework_jwt.views", ".router import router from rest_framework_jwt.views import obtain_jwt_token urlpatterns = [ path('tokenAuth/', obtain_jwt_token), path(''," ]
[ "TreeNode(array[0]) q = Queue() q.put(root) i = 1 while not q.empty() and i", "getting DRL(reverse RL for DLR), reverse output to LRD. \"\"\" stack = []", "DRL(reverse RL for DLR), reverse output to LRD. \"\"\" stack = [] node", "the most left node while node: stack.append(node) node = node.left node = stack.pop()", "TreeNode): stack = [] node = root result = [] while node or", "node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if i + 1 != len(array):", "= node.right node = stack.pop() node = node.left return ' '.join(list(map(str, reversed(result)))) if", "' '.join(list(map(str, result))) def dlr(root: TreeNode): stack = [] node = root result", "= TreeNode(array[i]) q.put(node.left) if i + 1 != len(array): node.right = TreeNode(array[i +", "most left node while node: stack.append(node) node = node.left node = stack.pop() result.append(node.val)", "q.put(node.right) i += 2 return root def ldr(root: TreeNode): stack = [] node", "'.join(list(map(str, reversed(result)))) if __name__ == '__main__': root = TreeNode(1) root.right = TreeNode(2) root.right.left", "or stack: # go to the most left node while node: stack.append(node) node", "q.get() node.left = TreeNode(array[i]) q.put(node.left) if i + 1 != len(array): node.right =", "if i + 1 != len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i", "go to the most left node while node: stack.append(node) node = node.left node", "self.left = None self.right = None @staticmethod def from_array(array): # bfs construct binary", "[] node = root result = [] while node or stack: # go", "to the most left node while node: stack.append(node) node = node.left node =", "dlr(root: TreeNode): stack = [] node = root result = [] while node", "import Queue class TreeNode: def __init__(self, val): self.val = val self.left = None", "output to LRD. \"\"\" stack = [] node = root result = []", "stack: # go to the most left node while node: stack.append(node) node =", "!= len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i += 2 return root", "node = node.right node = stack.pop() node = node.left return ' '.join(list(map(str, reversed(result))))", "= [] while node or stack: while node: result.append(node.val) stack.append(node) node = node.right", "result))) def dlr(root: TreeNode): stack = [] node = root result = []", "from_array(array): # bfs construct binary tree root = TreeNode(array[0]) q = Queue() q.put(root)", "binary tree root = TreeNode(array[0]) q = Queue() q.put(root) i = 1 while", "class TreeNode: def __init__(self, val): self.val = val self.left = None self.right =", "!= len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if i + 1", "TreeNode): \"\"\" After getting DRL(reverse RL for DLR), reverse output to LRD. \"\"\"", "i = 1 while not q.empty() and i != len(array): node = q.get()", "= 1 while not q.empty() and i != len(array): node = q.get() node.left", "self.val = val self.left = None self.right = None @staticmethod def from_array(array): #", "node or stack: while node: result.append(node.val) stack.append(node) node = node.left node = stack.pop()", "result = [] while node or stack: # go to the most left", "= [] while node or stack: # go to the most left node", "None self.right = None @staticmethod def from_array(array): # bfs construct binary tree root", "node.left node = stack.pop() node = node.right return ' '.join(list(map(str, result))) def lrd(root:", "to LRD. \"\"\" stack = [] node = root result = [] while", "= stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode):", "or stack: while node: result.append(node.val) stack.append(node) node = node.right node = stack.pop() node", "node: stack.append(node) node = node.left node = stack.pop() result.append(node.val) node = node.right return", "[] while node or stack: while node: result.append(node.val) stack.append(node) node = node.left node", "def __init__(self, val): self.val = val self.left = None self.right = None @staticmethod", "and i != len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if i", "return root def ldr(root: TreeNode): stack = [] node = root result =", "After getting DRL(reverse RL for DLR), reverse output to LRD. \"\"\" stack =", "TreeNode(array[i + 1]) q.put(node.right) i += 2 return root def ldr(root: TreeNode): stack", "node or stack: # go to the most left node while node: stack.append(node)", "Queue class TreeNode: def __init__(self, val): self.val = val self.left = None self.right", "= node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack = [] node", "stack: while node: result.append(node.val) stack.append(node) node = node.left node = stack.pop() node =", "Queue() q.put(root) i = 1 while not q.empty() and i != len(array): node", "node = root result = [] while node or stack: while node: result.append(node.val)", "node = stack.pop() node = node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode):", "left node while node: stack.append(node) node = node.left node = stack.pop() result.append(node.val) node", "q = Queue() q.put(root) i = 1 while not q.empty() and i !=", "node = node.left return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root =", "while node: result.append(node.val) stack.append(node) node = node.left node = stack.pop() node = node.right", "for DLR), reverse output to LRD. \"\"\" stack = [] node = root", "= None @staticmethod def from_array(array): # bfs construct binary tree root = TreeNode(array[0])", "return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root = TreeNode(1) root.right =", "= None self.right = None @staticmethod def from_array(array): # bfs construct binary tree", "1 while not q.empty() and i != len(array): node = q.get() node.left =", "[] while node or stack: while node: result.append(node.val) stack.append(node) node = node.right node", "result.append(node.val) stack.append(node) node = node.right node = stack.pop() node = node.left return '", "= [] node = root result = [] while node or stack: while", "val self.left = None self.right = None @staticmethod def from_array(array): # bfs construct", "'.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for DLR), reverse", "while node: result.append(node.val) stack.append(node) node = node.right node = stack.pop() node = node.left", "1 != len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i += 2 return", "def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for DLR), reverse output to", "q.put(root) i = 1 while not q.empty() and i != len(array): node =", "TreeNode(array[i]) q.put(node.left) if i + 1 != len(array): node.right = TreeNode(array[i + 1])", "+ 1]) q.put(node.right) i += 2 return root def ldr(root: TreeNode): stack =", "node: result.append(node.val) stack.append(node) node = node.right node = stack.pop() node = node.left return", "len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i += 2 return root def", "node = node.left node = stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str,", "' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root = TreeNode(1) root.right = TreeNode(2)", "not q.empty() and i != len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left)", "node.right = TreeNode(array[i + 1]) q.put(node.right) i += 2 return root def ldr(root:", "node while node: stack.append(node) node = node.left node = stack.pop() result.append(node.val) node =", "self.right = None @staticmethod def from_array(array): # bfs construct binary tree root =", "node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack = [] node =", "\"\"\" After getting DRL(reverse RL for DLR), reverse output to LRD. \"\"\" stack", "root result = [] while node or stack: while node: result.append(node.val) stack.append(node) node", "__init__(self, val): self.val = val self.left = None self.right = None @staticmethod def", "= stack.pop() node = node.left return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__':", "node.right node = stack.pop() node = node.left return ' '.join(list(map(str, reversed(result)))) if __name__", "= node.left node = stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str, result)))", "= val self.left = None self.right = None @staticmethod def from_array(array): # bfs", "queue import Queue class TreeNode: def __init__(self, val): self.val = val self.left =", "result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for DLR), reverse output", "while node: stack.append(node) node = node.left node = stack.pop() result.append(node.val) node = node.right", "@staticmethod def from_array(array): # bfs construct binary tree root = TreeNode(array[0]) q =", "2 return root def ldr(root: TreeNode): stack = [] node = root result", "result.append(node.val) node = node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack =", "while not q.empty() and i != len(array): node = q.get() node.left = TreeNode(array[i])", "ldr(root: TreeNode): stack = [] node = root result = [] while node", "__name__ == '__main__': root = TreeNode(1) root.right = TreeNode(2) root.right.left = TreeNode(3) print(ldr(root))", "= root result = [] while node or stack: # go to the", "node = stack.pop() node = node.left return ' '.join(list(map(str, reversed(result)))) if __name__ ==", "node = node.left node = stack.pop() node = node.right return ' '.join(list(map(str, result)))", "= stack.pop() node = node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\"", "def dlr(root: TreeNode): stack = [] node = root result = [] while", "= node.left node = stack.pop() node = node.right return ' '.join(list(map(str, result))) def", "reversed(result)))) if __name__ == '__main__': root = TreeNode(1) root.right = TreeNode(2) root.right.left =", "+= 2 return root def ldr(root: TreeNode): stack = [] node = root", "stack.append(node) node = node.right node = stack.pop() node = node.left return ' '.join(list(map(str,", "val): self.val = val self.left = None self.right = None @staticmethod def from_array(array):", "node or stack: while node: result.append(node.val) stack.append(node) node = node.right node = stack.pop()", "RL for DLR), reverse output to LRD. \"\"\" stack = [] node =", "= [] node = root result = [] while node or stack: #", "stack.pop() node = node.left return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root", "i += 2 return root def ldr(root: TreeNode): stack = [] node =", "node = node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack = []", "node = root result = [] while node or stack: # go to", "= TreeNode(array[i + 1]) q.put(node.right) i += 2 return root def ldr(root: TreeNode):", "while node or stack: while node: result.append(node.val) stack.append(node) node = node.right node =", "while node or stack: while node: result.append(node.val) stack.append(node) node = node.left node =", "node = node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting", "result.append(node.val) stack.append(node) node = node.left node = stack.pop() node = node.right return '", "stack: while node: result.append(node.val) stack.append(node) node = node.right node = stack.pop() node =", "= node.left return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root = TreeNode(1)", "or stack: while node: result.append(node.val) stack.append(node) node = node.left node = stack.pop() node", "q.empty() and i != len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if", "\"\"\" stack = [] node = root result = [] while node or", "q.put(node.left) if i + 1 != len(array): node.right = TreeNode(array[i + 1]) q.put(node.right)", "root def ldr(root: TreeNode): stack = [] node = root result = []", "reverse output to LRD. \"\"\" stack = [] node = root result =", "= Queue() q.put(root) i = 1 while not q.empty() and i != len(array):", "while node or stack: # go to the most left node while node:", "stack.append(node) node = node.left node = stack.pop() result.append(node.val) node = node.right return '", "= TreeNode(array[0]) q = Queue() q.put(root) i = 1 while not q.empty() and", "[] node = root result = [] while node or stack: while node:", "node: result.append(node.val) stack.append(node) node = node.left node = stack.pop() node = node.right return", "' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for DLR),", "from queue import Queue class TreeNode: def __init__(self, val): self.val = val self.left", "'.join(list(map(str, result))) def dlr(root: TreeNode): stack = [] node = root result =", "tree root = TreeNode(array[0]) q = Queue() q.put(root) i = 1 while not", "stack = [] node = root result = [] while node or stack:", "root result = [] while node or stack: # go to the most", "root = TreeNode(array[0]) q = Queue() q.put(root) i = 1 while not q.empty()", "# go to the most left node while node: stack.append(node) node = node.left", "= [] while node or stack: while node: result.append(node.val) stack.append(node) node = node.left", "# bfs construct binary tree root = TreeNode(array[0]) q = Queue() q.put(root) i", "LRD. \"\"\" stack = [] node = root result = [] while node", "i != len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if i +", "stack.append(node) node = node.left node = stack.pop() node = node.right return ' '.join(list(map(str,", "node = stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str, result))) def dlr(root:", "def from_array(array): # bfs construct binary tree root = TreeNode(array[0]) q = Queue()", "= root result = [] while node or stack: while node: result.append(node.val) stack.append(node)", "if __name__ == '__main__': root = TreeNode(1) root.right = TreeNode(2) root.right.left = TreeNode(3)", "1]) q.put(node.right) i += 2 return root def ldr(root: TreeNode): stack = []", "stack.pop() node = node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After", "len(array): node = q.get() node.left = TreeNode(array[i]) q.put(node.left) if i + 1 !=", "[] while node or stack: # go to the most left node while", "def ldr(root: TreeNode): stack = [] node = root result = [] while", "+ 1 != len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i += 2", "node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL", "return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for", "result = [] while node or stack: while node: result.append(node.val) stack.append(node) node =", "TreeNode: def __init__(self, val): self.val = val self.left = None self.right = None", "i + 1 != len(array): node.right = TreeNode(array[i + 1]) q.put(node.right) i +=", "return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack = [] node = root", "DLR), reverse output to LRD. \"\"\" stack = [] node = root result", "= q.get() node.left = TreeNode(array[i]) q.put(node.left) if i + 1 != len(array): node.right", "node.left node = stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str, result))) def", "construct binary tree root = TreeNode(array[0]) q = Queue() q.put(root) i = 1", "None @staticmethod def from_array(array): # bfs construct binary tree root = TreeNode(array[0]) q", "lrd(root: TreeNode): \"\"\" After getting DRL(reverse RL for DLR), reverse output to LRD.", "stack.pop() result.append(node.val) node = node.right return ' '.join(list(map(str, result))) def dlr(root: TreeNode): stack", "node.left return ' '.join(list(map(str, reversed(result)))) if __name__ == '__main__': root = TreeNode(1) root.right", "= node.right return ' '.join(list(map(str, result))) def lrd(root: TreeNode): \"\"\" After getting DRL(reverse", "node.left = TreeNode(array[i]) q.put(node.left) if i + 1 != len(array): node.right = TreeNode(array[i", "bfs construct binary tree root = TreeNode(array[0]) q = Queue() q.put(root) i =" ]
[ "a[i] < b[i]: scoreB += 1 return (scoreA, scoreB) a = list(map(int, input().split()))", "elif a[i] < b[i]: scoreB += 1 return (scoreA, scoreB) a = list(map(int,", "def compare(a, b): scoreA = 0 scoreB = 0 for i in range(len(a)):", "a = list(map(int, input().split())) b = list(map(int, input().split())) scoreA, scoreB = compare(a,b) print(scoreA,", "= 0 scoreB = 0 for i in range(len(a)): if a[i] > b[i]:", "b[i]: scoreB += 1 return (scoreA, scoreB) a = list(map(int, input().split())) b =", "< b[i]: scoreB += 1 return (scoreA, scoreB) a = list(map(int, input().split())) b", "if a[i] > b[i]: scoreA += 1 elif a[i] < b[i]: scoreB +=", "+= 1 elif a[i] < b[i]: scoreB += 1 return (scoreA, scoreB) a", "scoreB) a = list(map(int, input().split())) b = list(map(int, input().split())) scoreA, scoreB = compare(a,b)", "in range(len(a)): if a[i] > b[i]: scoreA += 1 elif a[i] < b[i]:", "0 for i in range(len(a)): if a[i] > b[i]: scoreA += 1 elif", "scoreA = 0 scoreB = 0 for i in range(len(a)): if a[i] >", "for i in range(len(a)): if a[i] > b[i]: scoreA += 1 elif a[i]", "= list(map(int, input().split())) b = list(map(int, input().split())) scoreA, scoreB = compare(a,b) print(scoreA, scoreB)", "b[i]: scoreA += 1 elif a[i] < b[i]: scoreB += 1 return (scoreA,", "(scoreA, scoreB) a = list(map(int, input().split())) b = list(map(int, input().split())) scoreA, scoreB =", "> b[i]: scoreA += 1 elif a[i] < b[i]: scoreB += 1 return", "1 elif a[i] < b[i]: scoreB += 1 return (scoreA, scoreB) a =", "1 return (scoreA, scoreB) a = list(map(int, input().split())) b = list(map(int, input().split())) scoreA,", "i in range(len(a)): if a[i] > b[i]: scoreA += 1 elif a[i] <", "a[i] > b[i]: scoreA += 1 elif a[i] < b[i]: scoreB += 1", "scoreA += 1 elif a[i] < b[i]: scoreB += 1 return (scoreA, scoreB)", "compare(a, b): scoreA = 0 scoreB = 0 for i in range(len(a)): if", "0 scoreB = 0 for i in range(len(a)): if a[i] > b[i]: scoreA", "return (scoreA, scoreB) a = list(map(int, input().split())) b = list(map(int, input().split())) scoreA, scoreB", "= 0 for i in range(len(a)): if a[i] > b[i]: scoreA += 1", "scoreB = 0 for i in range(len(a)): if a[i] > b[i]: scoreA +=", "range(len(a)): if a[i] > b[i]: scoreA += 1 elif a[i] < b[i]: scoreB", "+= 1 return (scoreA, scoreB) a = list(map(int, input().split())) b = list(map(int, input().split()))", "b): scoreA = 0 scoreB = 0 for i in range(len(a)): if a[i]", "scoreB += 1 return (scoreA, scoreB) a = list(map(int, input().split())) b = list(map(int," ]
[ "open('beers.json','r') as fin: beers = json.load(fin) for beer in beers[\"beers\"]: family = None", "= reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the beer') PBR =", "json.load(fin) for beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values(): family", "beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1", "abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"], spice=beer[\"spice\"], wood=beer[\"wood\"], family=family, smoke=beer[\"smoke\"], sour=beer[\"sour\"])", "User from flask_restful import Resource, Api, reqparse, fields, marshal_with from flask.json import jsonify", "= args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return beer.to_data()", "args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return beer.to_data() else:", "family = None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else:", "name).first() print(name) print(beer) if beer: return beer.to_data() else: return None class BeersApi(Resource): def", "as fin: beers = json.load(fin) for beer in beers[\"beers\"]: family = None if", "Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"], spice=beer[\"spice\"], wood=beer[\"wood\"], family=family, smoke=beer[\"smoke\"],", "beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default", "+ 1 else: family = 1 #default to 1 if not a family", "if beer: return beer.to_data() else: return None class BeersApi(Resource): def get(self): beers =", "else: return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers =", "dest='beer_name', type=str, required=True, help='The Name of the beer') PBR = { \"sour\": 1,", "Name of the beer') PBR = { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\",", "def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin) for beer in", "beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower())", "class BeersApi(Resource): def get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer", "reqparse, fields, marshal_with from flask.json import jsonify import os import json beer_get_parse =", "1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def get(self): args", "with open('beers.json','r') as fin: beers = json.load(fin) for beer in beers[\"beers\"]: family =", "[beer.to_data() for beer in beers]} else: return {\"beers\": []} def put(self): print(os.getcwd()) with", "\"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1,", "help='The Name of the beer') PBR = { \"sour\": 1, \"malty\": 1, \"family\":", "beers]} else: return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers", "\"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1,", "from beerbackend.user.models import Beer, families, User from flask_restful import Resource, Api, reqparse, fields,", "\"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\":", "1 if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"],", "def get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in beers]}", "beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in beers]} else: return", "put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin) for beer in beers[\"beers\"]:", "family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"], spice=beer[\"spice\"],", "for beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values(): family =", "{\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin) for", "\"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\":", "json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the beer')", "import Beer, families, User from flask_restful import Resource, Api, reqparse, fields, marshal_with from", "in beers]} else: return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as fin:", "\"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\":", "return beer.to_data() else: return None class BeersApi(Resource): def get(self): beers = Beer.query.all() if", "= 1 #default to 1 if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"],", "\"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\":", "= list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default to 1 if not", "family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default to 1 if", "from flask.json import jsonify import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name',", "\"fruit\": 1 } class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name = args.beer_name", "print(beer) if beer: return beer.to_data() else: return None class BeersApi(Resource): def get(self): beers", "= { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\":", "\"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\":", "jsonify import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The", "if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1", "import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the", "beers = json.load(fin) for beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in", "beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the beer') PBR", "in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default to", "[]} def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin) for beer", "1 else: family = 1 #default to 1 if not a family we", "1 } class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name = args.beer_name beer", "return None class BeersApi(Resource): def get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data()", "= json.load(fin) for beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values():", "args = beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer)", "to 1 if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"],", "None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family =", "fields, marshal_with from flask.json import jsonify import os import json beer_get_parse = reqparse.RequestParser()", "\"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\":", "a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"],", "print(name) print(beer) if beer: return beer.to_data() else: return None class BeersApi(Resource): def get(self):", "1 #default to 1 if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"],", "= None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family", "if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"],", "name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return", "import jsonify import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True,", "None class BeersApi(Resource): def get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for", "1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1,", "{ \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1,", "import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name", "beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return beer.to_data() else: return", "required=True, help='The Name of the beer') PBR = { \"sour\": 1, \"malty\": 1,", "BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name ==", "} class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name = args.beer_name beer =", "= Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in beers]} else: return {\"beers\":", "def get(self): args = beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first()", "families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default to 1", "Resource, Api, reqparse, fields, marshal_with from flask.json import jsonify import os import json", "type=str, required=True, help='The Name of the beer') PBR = { \"sour\": 1, \"malty\":", "\"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def get(self): args =", "\"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 }", "beerbackend.user.models import Beer, families, User from flask_restful import Resource, Api, reqparse, fields, marshal_with", "get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in beers]} else:", "1, \"bitter\": 1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1", "in beers[\"beers\"]: family = None if beer.get(\"family\").lower() in families.values(): family = list(families.values()).index(beer.get(\"family\").lower()) +", "import Resource, Api, reqparse, fields, marshal_with from flask.json import jsonify import os import", "flask_restful import Resource, Api, reqparse, fields, marshal_with from flask.json import jsonify import os", "fin: beers = json.load(fin) for beer in beers[\"beers\"]: family = None if beer.get(\"family\").lower()", "reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the beer') PBR = {", "Api, reqparse, fields, marshal_with from flask.json import jsonify import os import json beer_get_parse", "beer: return beer.to_data() else: return None class BeersApi(Resource): def get(self): beers = Beer.query.all()", "Beer, families, User from flask_restful import Resource, Api, reqparse, fields, marshal_with from flask.json", "PBR = { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\",", "\"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def get(self):", "== name).first() print(name) print(beer) if beer: return beer.to_data() else: return None class BeersApi(Resource):", "1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1, \"color\": 1, \"roasty\": 1,", "family = 1 #default to 1 if not a family we know Beer.create(beer_name=beer[\"name\"],", "print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin) for beer in beers[\"beers\"]: family", "1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1,", "beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer:", "list(families.values()).index(beer.get(\"family\").lower()) + 1 else: family = 1 #default to 1 if not a", "class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name", "get(self): args = beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name)", "= Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return beer.to_data() else: return None", "we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"], spice=beer[\"spice\"], wood=beer[\"wood\"],", "BeersApi(Resource): def get(self): beers = Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in", "families, User from flask_restful import Resource, Api, reqparse, fields, marshal_with from flask.json import", "flask.json import jsonify import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str,", "1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args()", "\"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\":", "from flask_restful import Resource, Api, reqparse, fields, marshal_with from flask.json import jsonify import", "not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"],", "1, \"fruit\": 1 } class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name =", "1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def", "else: family = 1 #default to 1 if not a family we know", "return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as fin: beers = json.load(fin)", "\"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource):", "Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if beer: return beer.to_data() else: return None class", "#default to 1 if not a family we know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"],", "else: return None class BeersApi(Resource): def get(self): beers = Beer.query.all() if beers: return{\"beers\":", "= beer_get_parse.parse_args() name = args.beer_name beer = Beer.query.filter(Beer.beer_name == name).first() print(name) print(beer) if", "beer in beers]} else: return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r') as", "know Beer.create(beer_name=beer[\"name\"], abv=beer[\"abv\"], bitter=beer[\"bitter\"], color=beer[\"color\"], fruit=beer[\"fruit\"], hoppy=beer[\"hoppy\"], malty=beer[\"malty\"], roasty=beer[\"roasty\"], sweet=beer[\"sweet\"], spice=beer[\"spice\"], wood=beer[\"wood\"], family=family,", "1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\": \"PBR\", \"abv\": 1, \"wood\": 1, \"bitter\": 1,", "if beers: return{\"beers\": [beer.to_data() for beer in beers]} else: return {\"beers\": []} def", "the beer') PBR = { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1,", "beer') PBR = { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\": 1, \"name\":", "os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of", "Beer.query.all() if beers: return{\"beers\": [beer.to_data() for beer in beers]} else: return {\"beers\": []}", "beers: return{\"beers\": [beer.to_data() for beer in beers]} else: return {\"beers\": []} def put(self):", "beer_get_parse.add_argument('beer_name', dest='beer_name', type=str, required=True, help='The Name of the beer') PBR = { \"sour\":", "marshal_with from flask.json import jsonify import os import json beer_get_parse = reqparse.RequestParser() beer_get_parse.add_argument('beer_name',", "return{\"beers\": [beer.to_data() for beer in beers]} else: return {\"beers\": []} def put(self): print(os.getcwd())", "for beer in beers]} else: return {\"beers\": []} def put(self): print(os.getcwd()) with open('beers.json','r')", "\"sweet\": 1, \"fruit\": 1 } class BeerApi(Resource): def get(self): args = beer_get_parse.parse_args() name", "1, \"color\": 1, \"roasty\": 1, \"spice\": 1, \"sweet\": 1, \"fruit\": 1 } class", "beer.to_data() else: return None class BeersApi(Resource): def get(self): beers = Beer.query.all() if beers:", "of the beer') PBR = { \"sour\": 1, \"malty\": 1, \"family\": \"pale-lager\", \"hoppy\":" ]
[ "= \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc", "affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config =", "assert item != None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item =", "# Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with", "= None key = None print(notification) if not (notification.stringAffinity in (None, \"\")): key", "0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None", "10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\")", "affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key)", "Notification import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from", "from Notification import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData", "import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient", "grpc import fabric_pb2_grpc import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import *", "{ PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] }", "to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\");", "item != None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item = item_cache.get(item.key);", "!= 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item !=", "!= None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item = item_cache.get(item.key); print(f\"Retrieved", "client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\")", "fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter", "PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) #", "\"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods", "= { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ]", "(notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) :", "SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import * from Notification", "PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to", "= client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None key = None print(notification)", "if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert", "None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item = item_cache.get(item.key); print(f\"Retrieved Item:", "= PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field", "from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient", "import fabric_pb2_grpc import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from", "CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub =", "if not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity !=", "stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using", "PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config);", "fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer", "\"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME:", "{notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field", "CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400')", "PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache", "import * from AffinityKey import * from Notification import CallData from Notification import", "'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item", ": key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None if", "key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity)", "fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import *", "* from Notification import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification,", "if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item = item_cache.get(item.key); print(f\"Retrieved Item: {final_item}\");", "import grpc import fabric_pb2_grpc import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import", "] } notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None key", "client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config", "None print(notification) if not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if", "= fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification", "item = notifications_cache.get(key) assert item != None if \"cache\" in vars(item): item_cache =", "print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer =", "\"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey,", "CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import", "NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item =", "[ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config); for", "(None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key =", "AffinityKey import * from Notification import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification,", "grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate);", "StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel =", "\"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ {", "notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\")", "import * from Notification import CallData from Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification,", "Notification import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate", "= fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient()", "'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config); for notification in", "NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None if \"cache\" in vars(item):", "client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString,", "import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import * from Notification import", "from SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import * from", "{ 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config); for notification", "= \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [", "notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None key = None", "notification in stub.Notifications(notification_filter): item = None key = None print(notification) if not (notification.stringAffinity", "} ] } notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None", "call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing", "'affinity' } ] } notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item =", "= NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item", "rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\")", "= client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name':", "stub.Notifications(notification_filter): item = None key = None print(notification) if not (notification.stringAffinity in (None,", "methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client", "cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' }", "channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter", "from AffinityKey import * from Notification import CallData from Notification import Notification, StreamItemNotification,", "item = None key = None print(notification) if not (notification.stringAffinity in (None, \"\")):", "client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls =", "fabric_pb2_grpc import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey", "import Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate =", "key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None if \"cache\"", "PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field =", "notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client =", "calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey',", "client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity'", "f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name': 'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache =", "} notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None key =", "client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\",", "notification filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True", "import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used", "print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field =", "= notifications_cache.get(key) assert item != None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache);", "affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION:", "Used to call rpc methods notification_filter = fabric_pb2.NotificationFilter(agentDelegate=agent_delegate); print(f\"Using notification filter with parameters", "client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = { PROP_NAME: f\"{agent_delegate}-$notifications\", PROP_CACHE_KEY_CONFIGURATION: [ { 'type_name':", "= NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None if \"cache\" in", "<reponame>obecto/perper<filename>samples/data-serialization/ds-python-grpc/main.py<gh_stars>10-100 import grpc import fabric_pb2_grpc import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes", "print(notification) if not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity", "key = None print(notification) if not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey,", "in stub.Notifications(notification_filter): item = None key = None print(notification) if not (notification.stringAffinity in", "StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel", "import fabric_pb2 from SimpleData import SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import", "parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong,", "not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0)", "in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity) if (notification.intAffinity != 0) : key", "\"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item = item_cache.get(item.key); print(f\"Retrieved Item: {final_item}\"); notifications_cache.get_and_remove(key);", "SimpleData from pyignite.datatypes.prop_codes import * from AffinityKey import * from Notification import CallData", "from pyignite.datatypes.prop_codes import * from AffinityKey import * from Notification import CallData from", "CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub", "True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls", "with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost', 10800)", "= \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData) calls = client.get_cache(\"calls\") cache_config = {", "pyignite.datatypes.prop_codes import * from AffinityKey import * from Notification import CallData from Notification", "filter with parameters {notification_filter}\"); print(\"Processing notifications...\") client = PerperThinClient() client.compact_footer = True client.connect('localhost',", "= None print(notification) if not (notification.stringAffinity in (None, \"\")): key = NotificationKeyString(key=notification.notificationKey, affinity=notification.stringAffinity)", "affinity=notification.intAffinity) item = notifications_cache.get(key) assert item != None if \"cache\" in vars(item): item_cache", "None key = None print(notification) if not (notification.stringAffinity in (None, \"\")): key =", "* from AffinityKey import * from Notification import CallData from Notification import Notification,", "(notification.intAffinity != 0) : key = NotificationKeyLong(key=notification.notificationKey, affinity=notification.intAffinity) item = notifications_cache.get(key) assert item", "agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call", "= True client.connect('localhost', 10800) client.register_binary_type(NotificationKeyLong, affinity_key_field = \"affinity\") client.register_binary_type(NotificationKeyString, affinity_key_field = \"affinity\") client.register_binary_type(SimpleData)", "client.get_cache(cache_config); for notification in stub.Notifications(notification_filter): item = None key = None print(notification) if", "= grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel) # Used to call rpc methods notification_filter =", "notifications_cache.get(key) assert item != None if \"cache\" in vars(item): item_cache = client.get_cache(item.cache); final_item", "from PerperThinClient import PerperThinClient agent_delegate = \"Application\" channel = grpc.insecure_channel('localhost:40400') stub = fabric_pb2_grpc.FabricStub(channel)", "for notification in stub.Notifications(notification_filter): item = None key = None print(notification) if not", "'NotificationKey', 'affinity_key_field_name': 'affinity' } ] } notifications_cache = client.get_cache(cache_config); for notification in stub.Notifications(notification_filter):", "Notification, StreamItemNotification, StreamTriggerNotification, CallResultNotification, CallTriggerNotification, CallData from PerperThinClient import PerperThinClient agent_delegate = \"Application\"" ]
[]
[ "certain layer :param units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20))", "plotNNFilter(units): \"\"\" Function to plot a certain layer :param units: convnet layer \"\"\"", "activations :param checkpoint_file: The saved model parameters for the basic model :param image:", "layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters):", "dimensions as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image", "models.base_convnet import inference import matplotlib.pyplot as plt import tensorflow as tf import cv2", "activations. wd = os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images):", "parameters for the basic model :param image: The supplied image (same dimensions as", "the model is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image =", "the model # and visualize activations. wd = os.getcwd() test_images = wd +", "units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter", "through the model # and visualize activations. wd = os.getcwd() test_images = wd", "= tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables())", "= tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3", "then run the test images through the model # and visualize activations. wd", "read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the", "supplied image (same dimensions as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE,", "os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images): print image_dir plot_activations(test_images", "and then run the test images through the model # and visualize activations.", "import inference import matplotlib.pyplot as plt import tensorflow as tf import cv2 import", "relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\"", "import cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to plot", "filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\",", "import tensorflow as tf import cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units):", "layer :param units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for", "tensorflow as tf import cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\"", "as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image =", "test image directory and then run the test images through the model #", "directory and then run the test images through the model # and visualize", "plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\")", "i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :,", "plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run", "plot a certain layer :param units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1,", "saved model parameters for the basic model :param image: The supplied image (same", ":, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image", "i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show()", "Run an image through the trained model and vizualize its activations :param checkpoint_file:", "# and visualize activations. wd = os.getcwd() test_images = wd + '/test_data/number_samples/' for", "visualize_activations(): # Get the test image directory and then run the test images", "trained model and vizualize its activations :param checkpoint_file: The saved model parameters for", "' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file,", "test images through the model # and visualize activations. wd = os.getcwd() test_images", "checkpoint_file: Where the model is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0)", "def plotNNFilter(units): \"\"\" Function to plot a certain layer :param units: convnet layer", "plt import tensorflow as tf import cv2 import os IMAGE_SIZE = 24 def", "image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt',", "units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def", "image): \"\"\" Run an image through the trained model and vizualize its activations", "The supplied image (same dimensions as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image,", "plot_activations(image_dir): \"\"\" Plot the activations for a given image :param checkpoint_file: Where the", "= tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units", ":param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA)", "<reponame>ramhiser/deep-learning-with-tensorflow-meetup from models.base_convnet import inference import matplotlib.pyplot as plt import tensorflow as tf", "read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def", "plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot", "and visualize activations. wd = os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir", "os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to plot a certain layer", ":param image: The supplied image (same dimensions as training). \"\"\" with tf.Graph().as_default(): image", "the activations for a given image :param checkpoint_file: Where the model is saved", "model parameters for the basic model :param image: The supplied image (same dimensions", "str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\"", "the trained model and vizualize its activations :param checkpoint_file: The saved model parameters", "to plot a certain layer :param units: convnet layer \"\"\" filters = units.shape[3]", "a certain layer :param units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20,", "image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2,", "units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for a given", "image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1,", "run the test images through the model # and visualize activations. wd =", "Where the model is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image", "filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7, 6,", ":param checkpoint_file: The saved model parameters for the basic model :param image: The", "= cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations():", "tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess", "interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test image directory and then", "through the trained model and vizualize its activations :param checkpoint_file: The saved model", "i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image through", "1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver", "as plt import tensorflow as tf import cv2 import os IMAGE_SIZE = 24", "= inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units", "image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image,", "plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for a given image :param checkpoint_file:", "image :param checkpoint_file: Where the model is saved :param image_dir: \"\"\" read_image =", "tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess =", "matplotlib.pyplot as plt import tensorflow as tf import cv2 import os IMAGE_SIZE =", "relu2, relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess,", "image: The supplied image (same dimensions as training). \"\"\" with tf.Graph().as_default(): image =", "def plot_activations(image_dir): \"\"\" Plot the activations for a given image :param checkpoint_file: Where", "visualize activations. wd = os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir in", "= cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test", "units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the", "= relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for a given image", "tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image,", "relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for", "\"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image", "plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for a", "tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1,", "model is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image,", "= relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir):", "20)) for i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i))", "an image through the trained model and vizualize its activations :param checkpoint_file: The", "run_model_image(checkpoint_file, image): \"\"\" Run an image through the trained model and vizualize its", "figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' +", "training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image)", "visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units)", "run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test image directory and then run", ":, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an", "checkpoint_file: The saved model parameters for the basic model :param image: The supplied", "= tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units =", "the test images through the model # and visualize activations. wd = os.getcwd()", "interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image through the", "= os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images): print image_dir", "# Get the test image directory and then run the test images through", "images through the model # and visualize activations. wd = os.getcwd() test_images =", "tf import cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to", "in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :,", ":param units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i", "model and vizualize its activations :param checkpoint_file: The saved model parameters for the", "= relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations", "= tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32)", "image=read_image) def visualize_activations(): # Get the test image directory and then run the", "tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess)", "test_images = wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images): print image_dir plot_activations(test_images +", "for the basic model :param image: The supplied image (same dimensions as training).", "image through the trained model and vizualize its activations :param checkpoint_file: The saved", "model # and visualize activations. wd = os.getcwd() test_images = wd + '/test_data/number_samples/'", "= wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images): print image_dir plot_activations(test_images + image_dir)", "cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to plot a", "24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test image directory and", "+ str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image):", "image directory and then run the test images through the model # and", "\"\"\" Function to plot a certain layer :param units: convnet layer \"\"\" filters", "relu3.eval(session=sess) plotNNFilter(units) def plot_activations(image_dir): \"\"\" Plot the activations for a given image :param", "xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i],", "inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units =", "relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session()", "IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True)", "with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image =", "\"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7,", "plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def", "inference import matplotlib.pyplot as plt import tensorflow as tf import cv2 import os", "vizualize its activations :param checkpoint_file: The saved model parameters for the basic model", "def run_model_image(checkpoint_file, image): \"\"\" Run an image through the trained model and vizualize", "cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): #", "(same dimensions as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1])", "its activations :param checkpoint_file: The saved model parameters for the basic model :param", "as tf import cv2 import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function", "activations for a given image :param checkpoint_file: Where the model is saved :param", "plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter '", "and vizualize its activations :param checkpoint_file: The saved model parameters for the basic", "[IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1])", "the basic model :param image: The supplied image (same dimensions as training). \"\"\"", "= tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE,", "24 def plotNNFilter(units): \"\"\" Function to plot a certain layer :param units: convnet", "from models.base_convnet import inference import matplotlib.pyplot as plt import tensorflow as tf import", "tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE,", "convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0,", "is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24,", ":param checkpoint_file: Where the model is saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir,", "plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image through the trained model and", "cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test image", "images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess)", "cmap=\"gray\") plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image through the trained", "def visualize_activations(): # Get the test image directory and then run the test", "= 24 def plotNNFilter(units): \"\"\" Function to plot a certain layer :param units:", "[1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False,", "Get the test image directory and then run the test images through the", "import os IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to plot a certain", "The saved model parameters for the basic model :param image: The supplied image", "(24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get the test image directory", "IMAGE_SIZE, 1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image", "0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image) def visualize_activations(): # Get", "tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units =", "tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3 =", "basic model :param image: The supplied image (same dimensions as training). \"\"\" with", "saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess)", "1]) image = tf.image.per_image_whitening(image) image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1]) image =", "6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0, :, :, i], interpolation=\"nearest\", cmap=\"gray\") plt.tight_layout(pad=3.0)", "units: convnet layer \"\"\" filters = units.shape[3] plt.figure(1, figsize=(20, 20)) for i in", "image = tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image, visualize=True) saver =", "saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units", "\"\"\" Plot the activations for a given image :param checkpoint_file: Where the model", "wd = os.getcwd() test_images = wd + '/test_data/number_samples/' for image_dir in os.listdir(test_images): print", "given image :param checkpoint_file: Where the model is saved :param image_dir: \"\"\" read_image", "for i in xrange(0, filters): plt.subplot(7, 6, i+1) plt.title('Filter ' + str(i)) plt.imshow(units[0,", "\"\"\" Run an image through the trained model and vizualize its activations :param", "for a given image :param checkpoint_file: Where the model is saved :param image_dir:", "the test image directory and then run the test images through the model", "\"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24), interpolation=cv2.INTER_AREA) run_model_image(checkpoint_file='./base/base.ckpt', image=read_image)", "sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units)", "import matplotlib.pyplot as plt import tensorflow as tf import cv2 import os IMAGE_SIZE", "save_path=checkpoint_file) units = relu1.eval(session=sess) plotNNFilter(units) units = relu2.eval(session=sess) plotNNFilter(units) units = relu3.eval(session=sess) plotNNFilter(units)", "plt.tight_layout(pad=3.0) plt.show() def run_model_image(checkpoint_file, image): \"\"\" Run an image through the trained model", "image (same dimensions as training). \"\"\" with tf.Graph().as_default(): image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE,", "relu3 = inference(train=False, images=image, visualize=True) saver = tf.train.Saver(tf.all_variables()) sess = tf.Session() saver.restore(sess=sess, save_path=checkpoint_file)", "saved :param image_dir: \"\"\" read_image = cv2.imread(image_dir, 0) read_image = cv2.resize(read_image, (24, 24),", "model :param image: The supplied image (same dimensions as training). \"\"\" with tf.Graph().as_default():", "Plot the activations for a given image :param checkpoint_file: Where the model is", "Function to plot a certain layer :param units: convnet layer \"\"\" filters =", "= units.shape[3] plt.figure(1, figsize=(20, 20)) for i in xrange(0, filters): plt.subplot(7, 6, i+1)", "a given image :param checkpoint_file: Where the model is saved :param image_dir: \"\"\"", "IMAGE_SIZE, IMAGE_SIZE, 1]) image = tf.cast(image, tf.float32) relu1, relu2, relu3 = inference(train=False, images=image,", "IMAGE_SIZE = 24 def plotNNFilter(units): \"\"\" Function to plot a certain layer :param" ]
[ "# params = dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes:", "np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat", "= -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv,", "= self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) #", "np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf)", "mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1 - alpha)", "no_train # factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout])", "self.updated: # self.sgp_layer.update_posterior() # self.updated = True # K = no_samples # fs", "dict(sgp_params) # init_params.update(lik_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns:", "= {} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys():", "- mout**2 return mout, vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx,", "Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2 *", "inputs (TYPE): Description # Returns: # TYPE: Description # \"\"\" # if not", ":] tn = 1.0 / variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i,", "(str, optional): Description prior_mean (int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError:", "= new_n2 - n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT:", "Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :, :] else: # parallel update", "+ 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop,", "# dm *= 0 # dm2 *= 0 # dm2 += 1e-16 #", "self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns: #", "y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v,", "xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] for k in range(no_sweeps): if", "* (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 = h_si *", "0: print 'epoch %d/%d' % (e, no_epochs) if not parallel: for n in", "return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE):", "mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE):", "self.x_prev_2 + self.x_next_2 + \\ (1 - alpha) * self.x_up_2 cav_up_1[0, :] +=", "* tn * wnScavSinvm term2c = 0.5 * tn * mwn**2 / oneminuswnSwn", "= self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n,", "Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description decay (float, optional):", "(TYPE): Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params)", "self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) # return", "Description \"\"\" # dict to hold hypers, inducing points and parameters of q(U)", "put this in config parallel = True # TODO: put this in config", "dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description", "(TYPE): Description idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError:", "idxs = np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v,", "= {'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return", "= {} grad_all = {} for k in range(no_sweeps): # if k %", "= np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 *", "2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd',", "A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv))", "self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m -", "gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu,", "(TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode == self.UP:", "T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac',", "put this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return", "self.means / self.variances) Vinv = self.Kuuinv + T2u self.Suinv = Vinv self.Su =", "= self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i],", "scipy.cluster.vq import kmeans2 from utils import * from kernels import * from lik_layers", "psi2) vout = psi0 + Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat,", "SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer =", "params['ls' + key_suffix] = ls params['zu' + key_suffix] = zu return params def", "Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv,", "decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update for", "cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 - alpha)", "self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1 /", "kuuinv ls = self.ls sf = self.sf Dout = self.Dout M = self.M", "Din = self.Din Dout = self.Dout if x_train is None: ls = np.log(np.ones((Din,", "cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav,", "Args: key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" params = {} M", "- alpha * tn * wnSwn term2a = 0.5 * alpha * tn**2", "* from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM", "cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav", "if mode == self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs,", "Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :] t2_old =", "x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf)", "= - self.N * 1.0 / batch_size / alpha xb = self.x_train[idxs, :]", "optional): Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs): if e", "(TYPE): Description Returns: TYPE: Description \"\"\" Su = self.Su mu = self.mu Lu", "self.SuinvMu = SuinvMunew[0, :, :] else: # parallel update self.t1[n, :, :] =", "= self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav,", "self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None,", "Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M)", "n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav", "alpha) * t2_old + t2_frac if t1_new.shape[0] == 1: # TODO: do damping", "Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2 return mout,", "+ frac_t2 tx1_new = decay * cur_t1 + (1 - decay) * tx1_new", "Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu)", "alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop,", "Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu)", "m_diff**2 / v_sum const_term = -0.5 * np.log(2 * np.pi * v_sum) alpha_term", "x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation if", "np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu +", "plt import time import pdb from scipy.cluster.vq import kmeans2 from utils import *", "frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] =", "print 'epoch %d/%d' % (e, no_epochs) if not parallel: for n in range(self.N):", "optional): Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else:", "(TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE):", "-dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 = h_si", "np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din))", ":, :]))) # axs[0].set_title('n = %d' % n[0]) # plt.show() def sample(self, x):", "* var_new_parallel # if alpha == 1: # rho = 0.5 # n1_new", "+ term2b + term2c + term2d) sgp_contrib = - term1 - term2 KuuinvMcav", "\\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\", "TYPE: Description \"\"\" # compute the leave one out moments t1n = self.t1[n,", "cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n", "mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode", "mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0,", "dsf2 += np.sum(dv) dsf = 2 * sf2 * dsf2 # compute the", "vf = kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout)", "isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha)", "TYPE: Description \"\"\" # dict to hold hypers, inducing points and parameters of", "# deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\", "grad_cav (TYPE): Description extra_args (TYPE): Description decay (int, optional): Description Returns: TYPE: Description", "Description # \"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True", "== self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1", "return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu and", "term2 = N / Nb * np.sum(term2a + term2b + term2c + term2d)", "+ np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) -", "Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class", "new_n1 - n1cav if mode == self.NEXT: idxs = np.arange(0, self.N - 1)", "params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "# self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE:", "dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew =", "= (1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay *", "t1_old + (1 - decay) * t1_new self.t2[n, :, :, :] = decay", "Description M (TYPE): Description mu (TYPE): Description N (TYPE): Description sf (int): Description", "# lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params)", ":]))) # axs[0].set_title('n = %d' % n[0]) # plt.show() def sample(self, x): \"\"\"Summary", "(TYPE): Description lik (str, optional): Description prior_mean (int, optional): Description prior_var (int, optional):", "lik (str, optional): Description prior_mean (int, optional): Description prior_var (int, optional): Description Raises:", "self.ls, 2 * self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu =", "+ self.tx2 vx = 1.0 / post_2 mx = post_1 / post_2 return", "mx (TYPE): Description vx (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\"", "if k % display_steps == 0: # print 'PEP, epoch: %d / %d'", "dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt:", "= np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M, M]) # TODO self.mu", "sn2) - 0.5 * np.log(alpha) logZ = exp_term + const_term + alpha_term dvt", "self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav,", "compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description Returns: TYPE:", "+ frac_t1 tx2_new = (1 - alpha) * cur_t2 + frac_t2 tx1_new =", "+ T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u)", "v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2", "np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary #", "= h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:,", "# compute grads wrt Ahat and Bhat dm_all = dm - 2 *", "rho) * n1_ori # n2_damped = rho * n2_new + (1.0 - rho)", "elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 =", "self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] =", "decay (float, optional): Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs):", "10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids =", "betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu", "cav_up_1, cav_up_2 elif mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs,", "(variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i", "idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav,", "alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay (TYPE): Description", "= self.M ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu = self.zu", "cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v,", "= np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 =", "/ cav_v frac_t1 = new_p1 - cav_m / cav_v # neg_idxs = np.where(frac_t2", "logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]],", "dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT,", "* new_m frac_n2 = new_n2 - n2cav frac_n1 = new_n1 - n1cav if", "+ key_suffix] = sf params['ls' + key_suffix] = ls params['zu' + key_suffix] =", "init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params # def get_hypers(self): # \"\"\"Summary", "var_i_new + mean_i / variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new *", "= np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m - post_m_mean) /", "params for latent variables N = self.N Din = self.Din self.x_prev_1 = np.zeros((N,", "axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' %", "self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args:", "(TYPE): Description v (TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE):", "compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE):", "= np.std(post_m, axis=0) post_m = (post_m - post_m_mean) / post_m_std post_v = 0.1", "def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m", "Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params", "N = self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm", "Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m =", "lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) # return params class", "parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _,", "* self.sf) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout", "= gamma + hd1 beta_si = beta - hd2h # projection h =", "fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs): #", "var_new_parallel = 1.0 / n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :]", "return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", "self.means = np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout, M]) self.beta =", "dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v,", "qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff -", "M (TYPE): Description mu (TYPE): Description N (TYPE): Description sf (int): Description Splusmm", "10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist", "+ sn2 / alpha m_diff = m_t - m_prop exp_term = -0.5 *", "[n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha)", "= self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm =", "n1_new self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new", "M, M]) # numpy variable for inducing points, Kuuinv, Kuu and its gradients", "self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary", "prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP',", "self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE):", "TODO: alternatitve method for non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean =", "mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description Returns: TYPE: Description", "# self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns:", "- 2 * dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner,", "config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def", "extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel update for", "- alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return", "for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2", "factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha)", "dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav,", "compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd',", "2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf']", "'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i", "np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables for", "kfu, Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha):", "grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n],", "Description no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one,", "a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute grads wrt Ahat and", "np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M])", "1.0 / alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav", "p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi =", "m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE): Description alpha", "n2cav (TYPE): Description idxs (TYPE): Description decay (float, optional): Description alpha (float, optional):", "True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self,", "self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1", "Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def", "dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda',", "approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su =", ":] += self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 +", "Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] *", "self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout", "a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8] # compute grads wrt Ahat", "vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy", "N = self.N if mb_size >= N: idxs = np.arange(N) xb = self.x_train", "sn2 / alpha m_diff = m_t - m_prop exp_term = -0.5 * m_diff**2", "cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for n in range(0, self.N", "= self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res)", "Description mu (TYPE): Description N (TYPE): Description sf (int): Description Splusmm (TYPE): Description", "np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 =", ":] cav_x2 = self.t02 + (1 - alpha) * self.tx2[n, :] cav_v =", "post_1 - self.t01 self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False,", "Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE): Description", "from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary", "Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi)", "np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] # update", "factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1,", "def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str, optional):", "t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description updated (bool): Description \"\"\"", ":] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 - alpha) * cur_n1 +", "Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx,", "(int, optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\", "np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum(", "* (1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i / variance_i", "def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional):", "axs[0].set_title('n = %d' % n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args: x", "decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :])))", "np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy variable for inducing", "alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self,", "= np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if N < 10000:", "xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy", "Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac',", "= Su + np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac',", "alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res)", "no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train,", ":] x = self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x,", "vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None, optional): Description Returns: TYPE: Description", "== 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2", "vcav - vcav**2 * (dmcav**2 - 2 * dvcav) new_n2 = 1.0 /", "mean_div_var_i_new * var_new_parallel # if alpha == 1: # rho = 0.5 #", "return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE:", "M = self.M Din = self.Din Dout = self.Dout if x_train is None:", "dmprop, dvprop, dmt, dvt def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav,", "N (TYPE): Description sgp_layer (TYPE): Description sn (int): Description updated (bool): Description x_next_1", "oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2 = N / Nb", "Description Returns: TYPE: Description \"\"\" params = {} M = self.M Din =", "kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x) kff += np.diag(JITTER", "np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return", "optional): Description Returns: TYPE: Description \"\"\" N = self.N scale_post = N *", "inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay", "'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args, x,", "def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description # Returns:", "v_sum**2 dvprop = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dmt", "post_2 return mx, vx # def predict_f(self, inputs): # \"\"\"Summary # Args: #", "np.exp(2 * self.sn) v_sum = v_t + v_prop + sn2 / alpha m_diff", "\"\"\" # compute the leave one out moments t1n = self.t1[n, :, :]", "alpha - 1.0 scale_cav = - N * 1.0 / alpha scale_prior =", "* tn * wnSwn term2a = 0.5 * alpha * tn**2 * gn**2", "psi0 + Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self, m, v, dm,", "self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat", "else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE):", "- np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :,", "0.5 * (1 - alpha) * np.log(2 * np.pi * sn2) - 0.5", "= self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy #", "self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav,", "Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A =", "= self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample", "self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0,", "'epoch %d/%d' % (e, no_epochs) if not parallel: for n in range(self.N): yn", "* wnScavSinvm term2c = 0.5 * tn * mwn**2 / oneminuswnSwn term2d =", "= \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm,", "np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE):", "True # TODO: put this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha,", "init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str, optional): Description", "/ %d' % (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) for", "self.x_train yb = self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs,", "(TYPE): Description t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE):", "this should reuse base models! \"\"\" import sys import math import numpy as", "no_pseudo (TYPE): Description lik (str, optional): Description prior_mean (int, optional): Description prior_var (int,", "(1 - alpha) * np.log(2 * np.pi * sn2) - 0.5 * np.log(alpha)", "self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new =", "self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs,", "t2n SuinvMuhat = self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat =", "np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm,", "np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si = beta - hd2h #", "= N * 1.0 / alpha - 1.0 scale_cav = - N *", "= self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav,", "self.x_train[idxs, :] yb = self.y_train[idxs, :] # update model with new hypers self.update_hypers(params)", "# TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): #", "+ vcav * dmcav new_v = vcav - vcav**2 * (dmcav**2 - 2", "no_sweeps, alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy,", "VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:,", "alpha (float, optional): Description prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\" N", "n1_ori # n2_damped = rho * n2_new + (1.0 - rho) * n2_ori", "gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2 =", "dv (TYPE): Description extra_args (TYPE): Description x (TYPE): Description alpha (float, optional): Description", "/ cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs =", "(TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2 *", "self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy", "Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M)", "self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt,", "self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx,", "1.0 / alpha - 1.0 scale_cav = - N * 1.0 / alpha", "Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav =", "- alpha) * np.log(2 * np.pi * sn2) - 0.5 * np.log(alpha) logZ", "M = self.M zu = self.zu self.Kuu = compute_kernel(2 * ls, 2 *", "alpha) * np.log(2 * np.pi * sn2) - 0.5 * np.log(alpha) logZ =", "self.y_train[idxs, :] # update model with new hypers self.update_hypers(params) # run power-EP and", "in range(Din): ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict()", "= prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i", ":, :] else: # parallel update self.t1[n, :, :] = decay * t1_old", "SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :] t2_old = self.t2[n, :, :,", "np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv", "self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2 *", "new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior(x_train, new_hypers=True)", ")) + 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1,", "np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] = sf params['ls' + key_suffix] =", "vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha)", "/ self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means /", "mx (TYPE): Description vx (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description", "grad_cav['vcav'] # perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner", "vf) # return my, vy # def update_hypers(self, params): # \"\"\"Summary # Args:", "variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self,", "the leave one out moments t1n = self.t1[n, :, :] t2n = self.t2[n,", "1.0 scale_cav = - N * 1.0 / alpha scale_prior = 1 phi_prior", "alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat,", "\"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su +", "psi0 = np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2", ":] return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown", "grads no_ep_sweeps = 10 # TODO: put this in config parallel = True", "u_sample) vf = kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0],", "else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs,", "h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :,", "+ self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :]", "import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu", "decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args:", "Description decay (TYPE): Description Returns: TYPE: Description \"\"\" # merge info from output", "Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su)", "self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay)", ":] = decay * cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs, :]", "1: # TODO: do damping here? self.t1[n, :, :] = t1_new self.t2[n, :,", ":] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new = -1.0 /", "(k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2", "def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet) =", "self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 - alpha) * cur_n1", "Kuuinv)), axis=0) S = 0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls,", "sf = self.sf Dout = self.Dout M = self.M zu = self.zu self.Kuu", "= self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu - alpha * t1n", "= np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ =", "term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav", "= 0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S,", "Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac',", "= x_train[randind[0:M], :] zu = centroids if N < 10000: X1 = np.copy(x_train)", "hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description", "]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet)", "dvprop, dmt, dvt def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav,", "cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v,", "- hd2h # projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i =", "energy /= N for key in grad_all.keys(): grad_all[key] /= N return energy, grad_all", "phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat,", "TYPE: Description \"\"\" params = {} M = self.M Din = self.Din Dout", "cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs", "dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads", "key in grad_all.keys(): grad_all[key] /= N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps,", "Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m = mcav + vcav", "np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav", "(int, optional): Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs): if", "params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" self.ls =", "new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE): Description \"\"\" self.sgp_layer.update_hypers(params, self.x_train) self.lik_layer.update_hypers(params)", "* cur_t2 + frac_t2 tx1_new = decay * cur_t1 + (1 - decay)", "dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x(", "(1.0 - alpha) * t1_old + t1_frac t2_new = (1.0 - alpha) *", "- np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2", "N = self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N,", "Description Returns: TYPE: Description \"\"\" Su = self.Su mu = self.mu Lu =", "temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi =", "(TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE):", "TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha,", "alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description", "alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :] return", "# TYPE: Description # \"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated", "-0.5 * np.log(2 * np.pi * v_sum) alpha_term = 0.5 * (1 -", "= - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:,", "sn (int): Description updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1", "self.ls = np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x", "(TYPE): Description N (TYPE): Description sf (int): Description Splusmm (TYPE): Description Su (TYPE):", "range(no_sweeps): if k % display_steps == 0: print 'PEP, epoch: %d / %d'", "* t1_new self.t2[n, :, :, :] = decay * t2_old + (1 -", "- decay) * tx1_new tx2_new = decay * cur_t2 + (1 - decay)", "\"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def", "np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02 =", "= {} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self,", "alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description", "self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1", "up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2", "cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 - alpha)", "= \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m,", "tn**2 * gn**2 * wnScavwn term2b = - gn * tn * wnScavSinvm", "(TYPE): Description sf (int): Description Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE):", "config parallel = True # TODO: put this in config energy, grad_all =", "self.sf Dout = self.Dout M = self.M zu = self.zu self.Kuu = compute_kernel(2", "= up_1 self.x_up_2 = up_2 for n in range(0, self.N - 1): #", "np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha * tn * wnSwn term2a", "= var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel def", "(TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description updated (bool):", "# only do this once at the begining for gaussian emission lik if", "Raises: NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1", "idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0],", "prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for latent variables", "beta = self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs,", "* np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1,", "Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac", "1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2", "self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din +", "= self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params", "dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1 and psi2 dpsi1", "\"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def", "cur_n1 + frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs,", "np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5", "decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel (bool,", "2 * self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu,", "self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise", "y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha)", "= dict(sgp_params) # init_params.update(lik_params) # return init_params # def get_hypers(self): # \"\"\"Summary #", "vx (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" N =", "(1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i / variance_i *", "= Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm,", "+= 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0):", "# self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy =", "self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def", "(float, optional): Description Returns: TYPE: Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat,", "- 2 * dvx) new_p2 = 1.0 / new_v new_p1 = new_p2 *", "Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args:", "(TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description prior_mean", "dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx,", "2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1,", "inputs (TYPE): Description # no_samples (int, optional): Description # Returns: # TYPE: Description", "self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 =", "optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train,", "idxs = np.arange(self.N - 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m,", "sf = np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] = sf params['ls' +", "M]) # TODO self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M])", "+ key_suffix] self.sf = params['sf' + key_suffix] self.zu = params['zu' + key_suffix] #", "\\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop,", "Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables for the", "* np.log(2 * np.pi * v_sum) alpha_term = 0.5 * (1 - alpha)", "+ (1 - decay) * tx1_new tx2_new = decay * cur_t2 + (1", "self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post * phi_post + scale_cav *", "dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description", "if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural", "self.x_next_2 + \\ (1 - alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0,", "= self.y_train[idxs, :] energy = {} grad_all = {} for k in range(no_sweeps):", "n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description", "mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description idxs", ":], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1", "(str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer", "x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train,", "np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda',", "* np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) qfu", "kfu) vout = kff + Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary", "dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m,", "\"\"\"Summary Args: mx (TYPE): Description vx (None, optional): Description Returns: TYPE: Description \"\"\"", "optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer =", "mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (None,", "return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description", "\"\"\"Summary # Args: # inputs (TYPE): Description # no_samples (int, optional): Description #", "axis=2) mean_i_new = mcav - dm / dm2 var_new_parallel = 1 / (1", "= np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt", "x_train[randind[0:M], :] zu = centroids if N < 10000: X1 = np.copy(x_train) else:", "self.compute_kuu() # compute mu and Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary", "* n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "emi_layer (TYPE): Description lik (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer", "\"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (None, optional): Description alpha", "'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha,", "psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1 and psi2", "Returns: TYPE: Description \"\"\" # merge info from output cav_up_m, cav_up_v, _, _", "moments t1n = self.t1[n, :, :] t2n = self.t2[n, :, :, :] Suinvhat", "yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' % n[0]) # plt.show() def sample(self,", "# n2_new = mean_new_parallel / var_new_parallel # n1_ori = 1.0 / variance_i #", "forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description", "* m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) #", "= (1 - alpha) * cur_t1 + frac_t1 tx2_new = (1 - alpha)", "PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m", ":] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0 / post_2 mx", "compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si)", "kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd',", "lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description updated", "cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop, vprop, extra_res)", "= t2_new # TODO: update posterior self.Su = Sunew[0, :, :, :] self.mu", "grads wrt Ahat and Bhat dm_all = dm - 2 * dv *", "TYPE: Description \"\"\" # update kuu and kuuinv ls = self.ls sf =", "M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description sn (int): Description updated", "2 * dvcav) new_n2 = 1.0 / new_v new_n1 = new_n2 * new_m", "self.ls params['sf' + key_suffix] = self.sf params_zu_i = self.zu params['zu' + key_suffix] =", "np.log(alpha) logZ = exp_term + const_term + alpha_term dvt = -0.5 / v_sum", "Kuu = self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu = self.mu Suinv", "update_hypers(self, params): # \"\"\"Summary # Args: # params (TYPE): Description # Returns: #", "cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav(", "* 1.0 / alpha - 1.0 scale_cav = - N * 1.0 /", "Args: mx (TYPE): Description vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv =", "cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 # deal with the", "Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m", "Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac',", "Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv,", "if N < 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 =", "= rho * n2_new + (1.0 - rho) * n2_ori # var_new_parallel =", "params['sf' + key_suffix] = self.sf params_zu_i = self.zu params['zu' + key_suffix] = self.zu", "alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav =", "= extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\", "\"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train", "]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns:", "/ batch_size / alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] for", "n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din])", "np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ +", "= \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m,", "self.N scale_post = N * 1.0 / alpha - 1.0 scale_cav = -", "m_diff**2 / v_sum**2 dmt = m_diff / v_sum dmprop = m_diff / v_sum", "self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self,", "return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE:", "{} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key]", "# run power-EP and compute grads no_ep_sweeps = 10 # TODO: put this", "gamma_si = gamma + hd1 beta_si = beta - hd2h # projection h", "np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon", "post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx", "xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] # update model with new", "\"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav", "n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx", "/ v_sum dmprop = m_diff / v_sum return logZ, dmprop, dvprop, dmt, dvt", "KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE):", "== 0: print 'epoch %d/%d' % (e, no_epochs) if not parallel: for n", "self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2", "dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav", "= dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi =", "dm, dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all = {} for", "compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls * np.exp(self.ls) dsf2", "update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns:", "= self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix]", "+ cav_v * dmx new_v = cav_v - cav_v**2 * (dmx**2 - 2", "dm dv_s = scale_logZ * dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad", "lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib +", "prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description", ":, :] t2_old = self.t2[n, :, :, :] t1_new = (1.0 - alpha)", "import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM,", "the posterior approximation if new_hypers and x_train is not None: Kfu = compute_kernel(2*self.ls,", "= self.sf params_zu_i = self.zu params['zu' + key_suffix] = self.zu return params def", "self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): # \"\"\"Summary #", "n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE):", "T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv", "* n2_new + (1.0 - rho) * n2_ori # var_new_parallel = 1.0 /", "decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV,", "Description prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\" N = self.N if", "1)), (1, Din)) else: if N < 10000: centroids, label = kmeans2(x_train, M,", "n1_new self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new", "= -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new =", "self.Su = Sunew[0, :, :, :] self.mu = munew[0, :, :] self.Suinv =", "cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) *", "= np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\", "randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind", "parallel (bool, optional): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" try:", "\"\"\" # compute the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu", "Vmm, Kuuinv)), axis=0) S = 0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers(", "- np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train", "cav_m / cav_v # neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0", "return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n", "1.0 / variance_i # n2_ori = mean_i / variance_i # n1_damped = rho", "Su = self.Su mu = self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T", "SuinvMunew[0, :, :] else: # parallel update self.t1[n, :, :] = decay *", "self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma", "TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _,", "n1cav (TYPE): Description n2cav (TYPE): Description decay (float, optional): Description alpha (float, optional):", "np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn =", "Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description lik (str, optional):", "self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1, up_2", "cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1,", "\\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha", "mf, vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: # inputs", "dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi", "Description Raises: NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1 +", "grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel(", "Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params =", "alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N -", "\"\"\"Summary Args: n (TYPE): Description x (TYPE): Description alpha (TYPE): Description Returns: TYPE:", "params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary", "self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u", "= self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat,", "np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 =", ":] var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2)", "grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2]", ":, np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si = beta - hd2h", "cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\", "return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2", "[n], xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper,", "def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop", "def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description", "compute grads wrt Ahat and Bhat dm_all = dm - 2 * dv", "* cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 + (1 -", "Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu)", "Description Returns: TYPE: Description \"\"\" N = self.N scale_post = N * 1.0", "1.0 / batch_size / alpha # scale_logZ = 0 xb = self.x_train[idxs, :]", "cav_x2 cav_m = cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self,", "train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size", "+= np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x,", "- n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: idxs =", "randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if N <", "x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2", "= compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ,", "\"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 *", "\"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity", "(float, optional): Description Returns: TYPE: Description \"\"\" # compute the leave one out", "+= np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary", "/ n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs,", "alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n],", "= 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi =", "a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0],", "inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int,", ":] yb = self.y_train[idxs, :] # update model with new hypers self.update_hypers(params) #", "dvt = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop =", "axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :],", "grads = {} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls", "Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description", "# Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params", "decay * cur_t1 + (1 - decay) * tx1_new tx2_new = decay *", "epoch (TYPE): Description alpha (TYPE): Description decay (TYPE): Description Returns: TYPE: Description \"\"\"", "hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args:", "# compute mu and Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs,", "n (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" # compute", "# ssm_params = {'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params)", "optional): Description Returns: TYPE: Description \"\"\" # dict to hold hypers, inducing points", "# \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self):", "/ self.variances) Vinv = self.Kuuinv + T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv)", "+ dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x,", "decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self):", "\"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns: #", "self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha)", "= k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si]", "Returns: TYPE: Description \"\"\" N = self.N M = self.M ls = np.exp(self.ls)", "= self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls", "SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv", "np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm =", "Dout = self.Dout M = self.M zu = self.zu self.Kuu = compute_kernel(2 *", "i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv,", "alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional):", "cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav", "temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav", "non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std =", "Returns: # TYPE: Description # \"\"\" # if not self.updated: # self.sgp_layer.update_posterior() #", "# return my, vy # def update_hypers(self, params): # \"\"\"Summary # Args: #", "is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args:", "post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m", "np.log(2 * np.pi * v_sum) alpha_term = 0.5 * (1 - alpha) *", "cav_v frac_t1 = new_p1 - cav_m / cav_v # neg_idxs = np.where(frac_t2 <", "(TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE):", "output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE): Description", "# fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs):", "* cur_n2 + (1 - decay) * n2_new elif mode == self.PREV: cur_n1", "Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav,", "Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav,", "cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1", "lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params #", "a[5], a[6] Kuuinv = self.Kuuinv # compute grads wrt Ahat and Bhat dAhat", "t2_frac if t1_new.shape[0] == 1: # TODO: do damping here? self.t1[n, :, :]", "cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do this", "self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf,", "extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update", "def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params", "(TYPE): Description Returns: TYPE: Description \"\"\" # merge info from output cav_up_m, cav_up_v,", ":] * dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 =", "fs # def predict_y(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description", "= np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet return logZ_prior def compute_phi_posterior(self):", "\"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01 + self.tx1 post_2 = self.t02", "Description x (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat,", "= np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) -", "no_pseudo self.N = N = no_train # factor variables self.variances = np.zeros([N, Dout])", "* self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf", "const_term + alpha_term dvt = -0.5 / v_sum + 0.5 * m_diff**2 /", "cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav,", "damping here? self.t1[n, :, :] = t1_new self.t2[n, :, :, :] = t2_new", "variance_i # n1_damped = rho * n1_new + (1.0 - rho) * n1_ori", "Description cav_v (TYPE): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" dmx", "gn * tn * wnScavSinvm term2c = 0.5 * tn * mwn**2 /", "logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5 * \\", "kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat)", "n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs,", "cur_n1 + frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs,", "self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat,", "vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res,", "TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5", "- Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2 *", "(TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE):", "self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res,", "cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop, vprop,", "alpha) * cur_t2 + frac_t2 tx1_new = decay * cur_t1 + (1 -", ":] tx1_new = (1 - alpha) * cur_t1 + frac_t1 tx2_new = (1", "= self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu +", ":, :] self.SuinvMu = SuinvMunew[0, :, :] else: # parallel update self.t1[n, :,", "TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B =", "= np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff", "# TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers()", "Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description", "* Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 *", "alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :] return", "idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma =", "np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db',", "np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3", "1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0, self.N", "Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5", "* self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat)", "emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params = dict(sgp_params) #", "k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav,", "= M = no_pseudo self.N = N = no_train # factor variables self.variances", "* self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1 - alpha) *", "= 1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self,", "NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs,", ":] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha)", "_, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do this once", "no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ,", "parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n,", "= kff + Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx):", "(bool, optional): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" try: for", "- decay) * n1_new self.x_prev_2[idxs, :] = decay * cur_n2 + (1 -", "p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu =", "= True # TODO: put this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps,", "a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat =", "\"\"\" N = self.N M = self.M ls = np.exp(self.ls) sf2 = np.exp(2", "KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer", "cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode,", "dict() params['sf' + key_suffix] = sf params['ls' + key_suffix] = ls params['zu' +", "return fs # def predict_y(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE):", "this once at the begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and", "= 2 * sf2 * dsf2 # compute the gradients Vmm = Su", "cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t,", "x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2", "Description \"\"\" N = self.N if mb_size >= N: idxs = np.arange(N) xb", "mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu, kfu.T) Lf =", "== self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs,", "Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i", "...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE):", "mean_i = self.means[idxs, :] tn = 1.0 / variance_i gn = mean_i wnScav", "_ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n,", "= self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha,", "self.variances) Vinv = self.Kuuinv + T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu", "scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi", "Description tx1 (TYPE): Description tx2 (TYPE): Description updated (bool): Description \"\"\" def __init__(self,", "except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description", "= dict() params['sf' + key_suffix] = sf params['ls' + key_suffix] = ls params['zu'", "Returns: TYPE: Description \"\"\" # compute the posterior approximation self.Suinv = self.Kuuinv +", "= grad_cav['vx'] new_m = cav_m + cav_v * dmx new_v = cav_v -", "Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self,", "def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description Returns:", "self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout,", "* np.pi * v_sum) alpha_term = 0.5 * (1 - alpha) * np.log(2", "\"\"\" post_1 = self.t01 + self.tx1 post_2 = self.t02 + self.tx2 vx =", "self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat,", "yb = self.y_train[idxs, :] for k in range(no_sweeps): if k % display_steps ==", "Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description", "TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm =", "# TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps,", "y_train (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description", ":] t2_old = self.t2[n, :, :, :] t1_new = (1.0 - alpha) *", "Description Returns: TYPE: Description \"\"\" # prior factor cav_x1 = self.t01 + (1", "= PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m =", "self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0 /", "Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 * S", "range(Din): ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf'", "Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" if vx is None:", "* dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis,", "Description cav_m (TYPE): Description cav_v (TYPE): Description decay (float, optional): Description Returns: TYPE:", "self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout,", "models! \"\"\" import sys import math import numpy as np import scipy.linalg as", "Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1],", "v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm,", "self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...'", "= psi0 + Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self, m, v,", "(TYPE): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat,", "for i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm,", "kuu and kuuinv ls = self.ls sf = self.sf Dout = self.Dout M", "+ KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives(", "method for non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0)", "extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\", "Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var,", "display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO", "self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns:", "no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy,", "axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''):", "* dvx) new_p2 = 1.0 / new_v new_p1 = new_p2 * new_m frac_t2", "self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1", "hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural", "else: idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs,", "Description no_pseudo (TYPE): Description \"\"\" self.Din = Din = input_size self.Dout = Dout", "(1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1", "compute grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 =", "Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout = self.Dout Kuu =", "self.x_next_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new =", ":] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01", "alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v,", "= centroids if N < 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N)", "cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0):", "return logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self, mode, dmcav, dvcav, mcav,", "Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description", "term2c = 0.5 * tn * mwn**2 / oneminuswnSwn term2d = -0.5 /", "(TYPE): Description alpha (TYPE): Description decay (TYPE): Description Returns: TYPE: Description \"\"\" #", "mcav, vcav, dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1],", "psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx, self.zu)", "Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] #", "\"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary", "= self.x_train[idxs, :] yb = self.y_train[idxs, :] energy = {} grad_all = {}", "= new_n1 - n1cav if mode == self.NEXT: idxs = np.arange(0, self.N -", "None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x", "# sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) #", "math import numpy as np import scipy.linalg as npalg from scipy import special", "x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1", "optional): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" if vx is", "self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu = self.mu", "cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2 +", "(TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" # compute the", "gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i)", "if not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n,", ":] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha,", "beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info): h,", "dm_s = scale_logZ * dm dv_s = scale_logZ * dv dm2_s = scale_logZ", "= self.M zu = self.zu self.Kuu = compute_kernel(2 * ls, 2 * sf,", "the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary", "= m_diff / v_sum dmprop = m_diff / v_sum return logZ, dmprop, dvprop,", "self.tx2[n, :] tx1_new = (1 - alpha) * cur_t1 + frac_t1 tx2_new =", "sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer", "cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n", "self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 =", "posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su", "parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt", "N / Nb * np.sum(term2a + term2b + term2c + term2d) sgp_contrib =", "cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha)", "t2_old + t2_frac if t1_new.shape[0] == 1: # TODO: do damping here? self.t1[n,", "alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all = {} for key in", "self.ls sf = self.sf Dout = self.Dout M = self.M zu = self.zu", "* KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi", "Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE): Description mu (TYPE): Description N", "* tx1_new tx2_new = decay * cur_t2 + (1 - decay) * tx2_new", "self.updated: # self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) #", "= np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x,", "self.sgp_layer.update_posterior() # self.updated = True # K = no_samples # fs = np.zeros((inputs.shape[0],", "\"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel (bool, optional):", "layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:,", "logZ = exp_term + const_term + alpha_term dvt = -0.5 / v_sum +", "self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :, :] else: #", "self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout", "cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1,", "elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 =", "= prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT =", "super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer =", "+ np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] tn", "idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2,", "(1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs,", "vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt,", ":, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n", "dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE):", "optional): Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m", "from kernels import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import", "x_train (None, optional): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" #", "- SuinvMuhat t1_old = self.t1[n, :, :] t2_old = self.t2[n, :, :, :]", "KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE):", "Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description", "for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "Vinv = self.Kuuinv + T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu =", "self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var", "Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat =", "n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE:", "self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n],", "= np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2", "Description idxs (TYPE): Description decay (float, optional): Description alpha (float, optional): Description Returns:", "x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description", "energy = {} grad_all = {} for k in range(no_sweeps): # if k", "Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control,", "= self.N M = self.M Din = self.Din Dout = self.Dout if x_train", "self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel update for entire dataset #", "zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self,", "dm2, dv, x, alpha, prop_info): N = self.N Nb = idxs.shape[0] sf2 =", "epoch: %d / %d' % (k, no_sweeps) find_log_lik = compute_energy and (k ==", "Returns: TYPE: Description \"\"\" Su = self.Su mu = self.mu Lu = np.linalg.cholesky(Su)", "key_suffix] self.sf = params['sf' + key_suffix] self.zu = params['zu' + key_suffix] # update", "# plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description", "VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1 = -", "= self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params)", "self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 -", "= self.x_train[idxs, :] yb = self.y_train[idxs, :] # update model with new hypers", "np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 *", "= 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper =", "vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay)", ")) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else:", ":], axis=2) oneminuswnSwn = 1 - alpha * tn * wnSwn term2a =", "= \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv #", "np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd',", "mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self, inputs, no_samples=1):", "SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "* n1_new + (1.0 - rho) * n1_ori # n2_damped = rho *", "dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib", "TYPE: Description \"\"\" N = self.N scale_post = N * 1.0 / alpha", "vy # def update_hypers(self, params): # \"\"\"Summary # Args: # params (TYPE): Description", "* self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su =", "Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew =", "np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :]", "at the begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch ==", "np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab',", "Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat,", "== 0: print 'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay)", "+= 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary", "psi0 + Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1,", "Args: n (TYPE): Description mx (TYPE): Description vx (None, optional): Description alpha (float,", "x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu)", "= compute_kernel(2 * self.ls, 2 * self.sf, x, x) kff += np.diag(JITTER *", "cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0,", "TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() #", "n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :]", "# params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE):", "(1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha == 1:", "self.ls = np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description", "frac_t2 tx1_new = decay * cur_t1 + (1 - decay) * tx1_new tx2_new", "x_train is None: ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf", "\"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params)", "= np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0 +", "Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description", "mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2)", "hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description prior_mean (int, optional):", "Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha)", "m_diff / v_sum dmprop = m_diff / v_sum return logZ, dmprop, dvprop, dmt,", "Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE): Description", "tx1_new = decay * cur_t1 + (1 - decay) * tx1_new tx2_new =", "cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n],", "no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description output_size", "self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf params_zu_i =", "mode == self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :]", "- alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1 -", "= new_p2 - 1.0 / cav_v frac_t1 = new_p1 - cav_m / cav_v", "np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew,", "= self.Dout M = self.M zu = self.zu self.Kuu = compute_kernel(2 * ls,", "= np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float,", "(TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE):", "N * 1.0 / alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post =", "Returns: TYPE: Description \"\"\" # update kuu and kuuinv ls = self.ls sf", "= self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] +=", "cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav(", "* n2_new elif mode == self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs,", "TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() #", "mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE):", "(TYPE): Description ls (TYPE): Description M (TYPE): Description mu (TYPE): Description N (TYPE):", "(1 - decay) * tx1_new tx2_new = decay * cur_t2 + (1 -", "+ dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu return", "# params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object):", "N for key in grad_all.keys(): grad_all[key] /= N return energy, grad_all def run_pep_sequential(self,", "Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din,", "psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout", "def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv", "cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :]", "Args: params (TYPE): Description mb_size (TYPE): Description alpha (float, optional): Description prop_mode (TYPE,", "n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description", "tx1_new tx2_new = decay * cur_t2 + (1 - decay) * tx2_new self.tx1[n,", "alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :]", "n in range(0, self.N - 1): # deal with the dynamics factors here", ":] mean_i = self.means[idxs, :] tn = 1.0 / variance_i gn = mean_i", "(TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (float, optional): Description Returns:", "self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv,", "\"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size", "* np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n", "if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new", "\"\"\" new_m = mcav + vcav * dmcav new_v = vcav - vcav**2", "/ prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha,", "idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs, :]", "+ temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2", "# self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf", "/ cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 =", "dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 * sf2 * dsf2", "sgp_contrib + lik_contrib grad_all = {} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key]", "(mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop,", "dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls * np.exp(self.ls) dsf2 +=", "* np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity", "0 xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] energy = {} grad_all", "TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs,", "axis=0) post_m = (post_m - post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m)", "* dvcav) new_n2 = 1.0 / new_v new_n1 = new_n2 * new_m frac_n2", "= np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf)", "m (TYPE): Description v (TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args", "np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi", "* gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab',", "self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary", "Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) - Dout *", "Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum = v_t + v_prop +", "Args: n (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" #", "* cur_n2 + (1 - decay) * n2_new elif mode == self.PREV: idxs", "# compute the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu =", "Args: no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description", ":] cav_v = 1.0 / cav_x2 cav_m = cav_v * cav_x1 return cav_m,", "# return init_params # def get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description", "self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu = self.mu Suinv = self.Suinv", "for key in grad_all.keys(): grad_all[key] /= N return energy, grad_all def run_pep_sequential(self, idxs,", "else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args:", "dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew = Suhat", "= np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff + Bkfukuf extra_res = [muhat,", "decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential(", ":] = decay * cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs, :]", "[h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info):", "+ lik_contrib grad_all = {} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for", "alpha) * cur_t1 + frac_t1 tx2_new = (1 - alpha) * cur_t2 +", "post_2 * post_m self.tx1 = post_1 - self.t01 self.tx2 = post_2 - self.t02", "self.x_up_1 = up_1 self.x_up_2 = up_2 for n in range(0, self.N - 1):", "(TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE):", "idxs (TYPE): Description decay (float, optional): Description alpha (float, optional): Description Returns: TYPE:", "/ v_sum**2 dmt = m_diff / v_sum dmprop = m_diff / v_sum return", "psi1, dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat,", "x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)", "v_sum return logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self, mode, dmcav, dvcav,", "def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha, prop_info): N =", "Args: # inputs (TYPE): Description # no_samples (int, optional): Description # Returns: #", "= SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for latent variables self.tx1 =", "\"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() #", "xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn,", "/ variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if", "X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls", "a[5], a[6], a[7], a[8] # compute grads wrt Ahat and Bhat dm_all =", "# TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v,", "Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv,", "self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args:", "(None, optional): Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx)", "vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description decay (float, optional): Description", "the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m,", "m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav,", "kernels import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR,", "no_epochs) if not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout])", "return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix", "Description t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description", "as plt import time import pdb from scipy.cluster.vq import kmeans2 from utils import", "+ 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] +", "None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu =", "# fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for loop here", "alpha) idxs = np.arange(self.N - 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs,", "oneminuswnSwn = 1 - alpha * tn * wnSwn term2a = 0.5 *", "self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls,", "grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab',", "(float, optional): Description Returns: TYPE: Description \"\"\" N = self.N M = self.M", "\"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2", "if k % display_steps == 0: print 'PEP, epoch: %d / %d' %", "m_t - m_prop exp_term = -0.5 * m_diff**2 / v_sum const_term = -0.5", "self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su = self.Su", "alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N M =", "[n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha)", "for e in range(no_epochs): if e % 50 == 0: print 'epoch %d/%d'", "* self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else:", "def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior =", "temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1", ":] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda',", "self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su,", "- alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 +", "\\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT,", "= np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis,", "self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay)", "def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE): Description #", "post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v post_1 =", "* np.ones_like(post_m) post_2 = 1.0 / post_v post_1 = post_2 * post_m self.tx1", "= self.N if mb_size >= N: idxs = np.arange(N) xb = self.x_train yb", "Description mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description", "mx, vx # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE):", "def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv", "Description sf (int): Description Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE): Description", "= np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv -", "(1 - alpha) * cur_t2 + frac_t2 tx1_new = decay * cur_t1 +", "= self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params # def", "SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv", "Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :]", "Returns: TYPE: Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat,", "epsilon) kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x) kff +=", "cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1,", "self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout,", "sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} #", "* cur_t1 + frac_t1 tx2_new = (1 - alpha) * cur_t2 + frac_t2", "N (TYPE): Description sf (int): Description Splusmm (TYPE): Description Su (TYPE): Description Suinv", "\"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\", "= self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy #", "np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for loop here # for k", "(TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE):", "cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav(", "self.N = N = no_train # factor variables self.t1 = np.zeros([N, Dout, M])", "self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description", "- n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: cur_n1 =", "2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 =", "post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float,", "params = dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din", "np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float,", "self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn, vn,", "- 2 * dvcav) new_n2 = 1.0 / new_v new_n1 = new_n2 *", "self.Su mu = self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si,", "d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed + 1e-16) sf", "Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv", "vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description", "* self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 *", "if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught", "Vmm = Su + np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv -", "/ post_2 mx = post_1 / post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary", "mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE): Description", "should reuse base models! \"\"\" import sys import math import numpy as np", "[n], alpha, grad_cav, extra_res) else: # parallel update for entire dataset # TODO:", "dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs,", "/= N for key in grad_all.keys(): grad_all[key] /= N return energy, grad_all def", "Returns: TYPE: Description \"\"\" N = self.N scale_post = N * 1.0 /", "alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m,", "Description Returns: TYPE: Description \"\"\" N = self.N if mb_size >= N: idxs", "% (e, no_epochs) if not parallel: for n in range(self.N): yn = self.y_train[n,", "v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha,", "alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] for k in range(no_sweeps):", "+ Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self, m, v, dm, dv,", "(1 / var_i_new + 1 / variance_i * (1 - alpha)) mean_div_var_i_new =", "np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2,", "find_log_lik = compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha)", ":] = decay * cur_n2 + (1 - decay) * n2_new else: raise", "new hypers self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps = 10 #", "/ v_sum + 0.5 * m_diff**2 / v_sum**2 dmt = m_diff / v_sum", "optional): Description Returns: TYPE: Description \"\"\" self.ls = params['ls' + key_suffix] self.sf =", "dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args:", "+ key_suffix] = self.sf params_zu_i = self.zu params['zu' + key_suffix] = self.zu return", "self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description", ":] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv", "self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02 = 1.0 /", "n2_new + (1.0 - rho) * n2_ori # var_new_parallel = 1.0 / n1_damped", "mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha):", "1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1 =", "cav_v**2 * (dmx**2 - 2 * dvx) new_p2 = 1.0 / new_v new_p1", "M = no_pseudo self.N = N = no_train # factor variables self.variances =", "return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy,", "from scipy import special from scipy.optimize import minimize import matplotlib.pyplot as plt import", "Returns: TYPE: Description \"\"\" try: for e in range(no_epochs): if e % 50", "self.x_prev_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new =", "[n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args:", "self.Dcon_emi, self.Dout, self.M) # natural params for latent variables N = self.N Din", "t1_frac t2_new = (1.0 - alpha) * t2_old + t2_frac if t1_new.shape[0] ==", "- alpha) * self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m = cav_v", "0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu", "= np.zeros([N, Dout, M, M]) # TODO self.mu = np.zeros([Dout, M, ]) self.Su", "/ cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha):", "* self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A)", "optional): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" try: for e", "tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns:", "/ v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum", "= np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu =", "NotImplementedError: Description \"\"\" new_m = mcav + vcav * dmcav new_v = vcav", "= np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed + 1e-16) sf =", "= self.x_next_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new", "self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu", "xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] energy = {} grad_all =", "Returns: TYPE: Description \"\"\" self.ls = params['ls' + key_suffix] self.sf = params['sf' +", "# prior factor cav_x1 = self.t01 + (1 - alpha) * self.tx1[n, :]", "(TYPE): Description # no_samples (int, optional): Description # Returns: # TYPE: Description #", "\"\"\" N = self.N if mb_size >= N: idxs = np.arange(N) xb =", "M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv", "mout, vout def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description", "(TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\"", "Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description", "* sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu)", "gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info): h, beta_si,", "# rho = 0.5 # n1_new = 1.0 / var_new_parallel # n2_new =", "Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params", "= \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn,", "self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02 = 1.0", "cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay)", "return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE:", "Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 +", "prior_var self.t02 = 1.0 / prior_var # TODO: alternatitve method for non real-valued", "compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i =", "TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 *", "dsf2 # compute the gradients Vmm = Su + np.einsum('da,db->dab', mu, mu) S", "= self.means[idxs, :] var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] *", "self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *= 0 # dm2 *=", "idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 =", "else: # parallel update self.t1[n, :, :] = decay * t1_old + (1", "= up_2 for n in range(0, self.N - 1): # deal with the", "+ Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args:", "# \"\"\"Summary # Args: # inputs (TYPE): Description # Returns: # TYPE: Description", "+ key_suffix] = self.zu return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args:", "np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn =", "self.zu Kuuinv = self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1,", "# sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)}", "= self.zu Kuuinv = self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm,", "np.std(post_m, axis=0) post_m = (post_m - post_m_mean) / post_m_std post_v = 0.1 *", "/ (1 / var_i_new + 1 / variance_i * (1 - alpha)) mean_div_var_i_new", "self.tx2 vx = 1.0 / post_2 mx = post_1 / post_2 return mx,", "= [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res def", "n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx", "* dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si = beta", "np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff", "self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu - alpha * t1n Suhat", "1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] +", "approximation if new_hypers and x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train,", "self.N Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout = self.Dout Kuu", "\"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError:", "ls (TYPE): Description M (TYPE): Description mu (TYPE): Description N (TYPE): Description sf", "inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description # Returns: # TYPE:", "* cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 + (1 -", "* Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:,", "np.arange(N) xb = self.x_train yb = self.y_train else: idxs = np.random.choice(N, mb_size, replace=False)", "self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new elif", "(TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE): Description zu (TYPE):", "def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE): Description", "vcav**2 * (dmcav**2 - 2 * dvcav) new_n2 = 1.0 / new_v new_n1", "+ scale_post * phi_post + scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n,", "= self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha -", "SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for latent variables self.tx1 = np.zeros((self.N,", "in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for key in grad_all.keys(): grad_all[key]", "n (TYPE): Description mx (TYPE): Description vx (None, optional): Description alpha (float, optional):", "alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 + (1", "self.mu = munew[0, :, :] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu =", "- term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :,", "Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV,", "alpha (float, optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description decay", "* np.log(2 * np.pi * sn2) - 0.5 * np.log(alpha) logZ = exp_term", "key_suffix] = zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional):", "self.compute_kuu() # compute mu and Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self,", "= self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description", "cav_m = cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n,", "* new_m frac_t2 = new_p2 - 1.0 / cav_v frac_t1 = new_p1 -", "Description Returns: TYPE: Description \"\"\" self.ls = params['ls' + key_suffix] self.sf = params['sf'", "Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1", "self.M) # natural params for latent variables N = self.N Din = self.Din", "init_params.update(lik_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns: # TYPE:", "\"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE):", "np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf +", "scale_logZ * dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs,", "self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res)", "1.0 / cav_x2 cav_m = cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2", "\"\"\" # merge info from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha)", "Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description", "self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha", "= True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf)", "grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE):", "self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary", "dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm", "xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel update for entire", "dmt, dvt def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0,", "grad_all.keys(): grad_all[key] /= N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy,", "alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE: Description \"\"\" N =", "temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 *", "optional): Description alpha (float, optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\"", "cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind])", "points, Kuuinv, Kuu and its gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M,", "p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu", "= np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def", "dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib =", "self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary", "TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2", "1.0 / new_v new_p1 = new_p2 * new_m frac_t2 = new_p2 - 1.0", "vx) return my, vy, vyn # def predict_f(self, inputs): # \"\"\"Summary # Args:", "t1n = self.t1[n, :, :] t2n = self.t2[n, :, :, :] Suinvhat =", "v_cav, yb, alpha, compute_dm2=True) # dm *= 0 # dm2 *= 0 #", "Description mx (TYPE): Description vx (TYPE): Description alpha (float, optional): Description Returns: TYPE:", "dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav =", "input_size self.Dout = Dout = output_size self.M = M = no_pseudo self.N =", "ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu = self.zu Kuuinv =", "np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff +", "# \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers()", "n2_new = mean_new_parallel / var_new_parallel # n1_ori = 1.0 / variance_i # n2_ori", "alpha * tn * wnSwn term2a = 0.5 * alpha * tn**2 *", "self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var =", ":].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn,", "= self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha)", "self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf'", "Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum(", "(k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) for i in range(batch_size):", "Description \"\"\" params = {} M = self.M Din = self.Din Dout =", "np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx': dmx, 'vx': dvx, 'mcav':", "M]) # variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0", "dm / dm2 var_new_parallel = 1 / (1 / var_i_new + 1 /", "self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :] cav_next_2 += (1", "decay * t1_old + (1 - decay) * t1_new self.t2[n, :, :, :]", "new_n2 * new_m frac_n2 = new_n2 - n2cav frac_n1 = new_n1 - n1cav", "= {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def", "Su + np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv,", "a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute grads wrt", "fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for loop here #", "p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:,", "self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N", "Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else:", "= decay * t1_old + (1 - decay) * t1_new self.t2[n, :, :,", "= np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return", "dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] +", "1.0 / variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn =", "- decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new def", "self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1,", "v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav,", "\"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout,", "+= (1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 /", "Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx", "0.5 * m_diff**2 / v_sum**2 dmt = m_diff / v_sum dmprop = m_diff", "Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns:", "new_v new_p1 = new_p2 * new_m frac_t2 = new_p2 - 1.0 / cav_v", "= np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 +", "else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 =", "= dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes:", "and its gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv =", "\"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1", "= 1 - alpha * tn * wnSwn term2a = 0.5 * alpha", "alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description mcav", "Args: m (TYPE): Description v (TYPE): Description dm (TYPE): Description dv (TYPE): Description", "self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu) self.Kuu += np.diag(JITTER", "alpha) if not self.gp_emi: # only do this once at the begining for", "wnScavSinvm term2c = 0.5 * tn * mwn**2 / oneminuswnSwn term2d = -0.5", "KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ,", "and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu)", "v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE): Description Returns:", "cav_v - cav_v**2 * (dmx**2 - 2 * dvx) new_p2 = 1.0 /", "predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description # Returns: #", "Description SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE): Description zu (TYPE): Description", "M = self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix] =", "optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer =", "(TYPE): Description dv (TYPE): Description extra_args (TYPE): Description x (TYPE): Description alpha (float,", "Sunew[0, :, :, :] self.mu = munew[0, :, :] self.Suinv = Suinvnew[0, :,", "Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]", "= np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B =", "(np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :]", "SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat)", "* t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) #", "post_1 / post_2 return mx, vx # def predict_f(self, inputs): # \"\"\"Summary #", "x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf =", "up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else:", "if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx,", "SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior", "self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 -", ":, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi,", "prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo,", "def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description", "/ new_v new_n1 = new_n2 * new_m frac_n2 = new_n2 - n2cav frac_n1", "self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu, kfu.T) Lf", "= {} M = self.M Din = self.Din Dout = self.Dout params['ls' +", "self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) #", "extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha)", "Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2],", "self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav,", "np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None,", "tx2 (TYPE): Description updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian',", "Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description sn (int): Description", "alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v,", "np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2 =", "1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs,", "np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N,", "(TYPE): Description n2cav (TYPE): Description decay (float, optional): Description alpha (float, optional): Description", "Args: alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N scale_post", "self.Din = Din = input_size self.Dout = Dout = output_size self.M = M", "(TYPE): Description mb_size (TYPE): Description alpha (float, optional): Description prop_mode (TYPE, optional): Description", "k in range(no_sweeps): if k % display_steps == 0: print 'PEP, epoch: %d", "Description t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description updated (bool): Description", "new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :]", "= self.N M = self.M ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf)", "# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self, inputs,", "grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs,", "cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) =", "up_1 self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1", "= self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat,", "+ v_prop + sn2 / alpha m_diff = m_t - m_prop exp_term =", "vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\"", ":, :] = t1_new self.t2[n, :, :, :] = t2_new # TODO: update", "+ self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :] cav_next_2 +=", "self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res,", "out moments t1n = self.t1[n, :, :] t2n = self.t2[n, :, :, :]", ":] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode ==", "= m_diff / v_sum return logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self,", "* dmcav new_v = vcav - vcav**2 * (dmcav**2 - 2 * dvcav)", "Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab',", "self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix] self.zu = params['zu'", "(TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False,", "tx2_new = (1 - alpha) * cur_t2 + frac_t2 tx1_new = decay *", "kfu, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv", "mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2)", "'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", "cur_t2 + frac_t2 tx1_new = decay * cur_t1 + (1 - decay) *", "np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean", "cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'],", "axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m - post_m_mean) / post_m_std post_v", "= (mean_i_new / var_i_new + mean_i / variance_i * (1 - alpha)) mean_new_parallel", "y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size,", "self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description", "* n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs,", "compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size", "variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self):", "self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf =", "= self.gamma beta = self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i", "- np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self,", "k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new", "input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE):", "(bool, optional): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" try: for", "self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1 -", "decay (TYPE): Description Returns: TYPE: Description \"\"\" # merge info from output cav_up_m,", "optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0],", "frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new =", "extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\ a[0],", "# TODO: alternatitve method for non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean", "KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi", "= compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls * np.exp(self.ls)", "Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls,", "run power-EP and compute grads no_ep_sweeps = 10 # TODO: put this in", "mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res,", "np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 * S + dKuu_via_logZ dhyp =", "# self.updated = True # K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout,", "dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x(", "= np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u =", "mean_i / variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel #", "Description N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE): Description", "cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha)", "new_v = vcav - vcav**2 * (dmcav**2 - 2 * dvcav) new_n2 =", "cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\", "idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\"", "kff = np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls, 2 * self.sf,", "\"\"\" self.Din = Din = input_size self.Dout = Dout = output_size self.M =", "centroids, label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M],", "= np.arange(self.N - 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v,", "np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2 =", "(TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description M (TYPE): Description N (TYPE):", "= compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav =", "dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *=", "# def update_hypers(self, params): # \"\"\"Summary # Args: # params (TYPE): Description #", "x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None,", "a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute grads wrt Ahat and Bhat", "get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01 + self.tx1 post_2 =", "if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO return", "run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N", "- np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i -", "decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description", "# Args: # inputs (TYPE): Description # no_samples (int, optional): Description # Returns:", "sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for key", "compute_energy and (k == no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info =", "Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da',", "t1_old = self.t1[n, :, :] t2_old = self.t2[n, :, :, :] t1_new =", "try: for e in range(no_epochs): if e % 50 == 0: print 'epoch", "T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv + T2u self.Suinv", "None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self,", "0.5 * np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return", "10 # TODO: put this in config parallel = True # TODO: put", "batch_size / alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] for k", "Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv =", "= np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2", "idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size / alpha xb =", "(1.0 - rho) * n1_ori # n2_damped = rho * n2_new + (1.0", "xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav", "self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description", "vx (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat,", "2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1,", "self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample =", "in range(no_sweeps): if k % display_steps == 0: print 'PEP, epoch: %d /", "True # K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) # #", "dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "1 / variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new +", "-0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dmt = m_diff /", "compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf =", "self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y,", "dm_all = dm - 2 * dv * m dAhat = np.einsum('nd,nm->ndm', dm_all,", "* p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha * tn *", "mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx", "= KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 /", "# compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu,", "from scipy.optimize import minimize import matplotlib.pyplot as plt import time import pdb from", ":, :, :] = t2_new # TODO: update posterior self.Su = Sunew[0, :,", "# parallel update for entire dataset # TODO: minibatch parallel idxs = np.arange(self.N)", "+ key_suffix] = zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str,", "= np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute", "/ var_i_new + 1 / variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new", "- 1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N,", "\"\"\"Summary Args: x_train (None, optional): Description key_suffix (str, optional): Description Returns: TYPE: Description", "* np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1]", "alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N scale_post =", "vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy", "Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls, 2 *", "print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n,", "# n2_damped = rho * n2_new + (1.0 - rho) * n2_ori #", "A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2 - mout**2", "np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if N < 10000: X1", "self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1", "key_suffix] = ls params['zu' + key_suffix] = zu return params def get_hypers(self, key_suffix=''):", "self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta h_si = p_i - np.einsum('dab,nb->nda',", "vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx", "def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha", "cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 =", "t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :] t2_old = self.t2[n,", "N: idxs = np.arange(N) xb = self.x_train yb = self.y_train else: idxs =", "= decay * cur_n2 + (1 - decay) * n2_new elif mode ==", "= self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad =", "= self.N Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout = self.Dout", "idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i", "self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params =", "0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new = (1 -", "* np.sum(term2a + term2b + term2c + term2d) sgp_contrib = - term1 -", "= dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE):", "= \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn,", "(TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE):", "* dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav,", "SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5],", "- n1cav if mode == self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1", "new_n2 - n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: cur_n1", "alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha)", "tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01 + self.tx1", "# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) # return", "M, M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy variable for inducing points,", "Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo,", "(float, optional): Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx']", "dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1,", "def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description", "scale_logZ = 0 xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] energy =", "sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /=", "term2c + term2d) sgp_contrib = - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv,", "(TYPE): Description mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE):", "(TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description sn (int): Description updated (bool):", "up_2 # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 =", "m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary", "np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description", "TODO: remove for loop here # for k in range(K): # fs[:, :,", "Description tx2 (TYPE): Description updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo,", "Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2 - mout**2 return", "= np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs,", "self.means[idxs, :] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav =", "psi1, psi2, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6],", "self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params) #", "= output_size self.M = M = no_pseudo self.N = N = no_train #", "vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda',", "else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 =", "zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav,", "dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn,", "centroids if N < 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1", "dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE):", "self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x,", ":, :] t2n = self.t2[n, :, :, :] Suinvhat = self.Suinv - alpha", "- n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs,", "not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu", "dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:,", "self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0 /", "key_suffix] self.zu = params['zu' + key_suffix] # update Kuu given new hypers self.compute_kuu()", "Kuuinv = self.Kuuinv # compute grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm',", "50 == 0: print 'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha,", "TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv", "Description m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv", "idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N *", "gn**2 * wnScavwn term2b = - gn * tn * wnScavSinvm term2c =", "\"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE):", "dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib = scale_logZ", "key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" params =", "def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE: Description \"\"\"", "prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik,", "1.0 / n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel", "params['sf' + key_suffix] = sf params['ls' + key_suffix] = ls params['zu' + key_suffix]", "Description Returns: TYPE: Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu,", "x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE): Description", "* p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2)", "- 0.5 * np.log(alpha) logZ = exp_term + const_term + alpha_term dvt =", "np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:,", "dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s,", "+ self.x_next_2 + \\ (1 - alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1", "cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0, self.N - 1)", "parameters of q(U) N = self.N M = self.M Din = self.Din Dout", "self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 =", "np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2", "= no_pseudo self.N = N = no_train # factor variables self.t1 = np.zeros([N,", "Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description decay (float, optional):", "prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i =", "(int, optional): Description parallel (bool, optional): Description decay (float, optional): Description Returns: TYPE:", "v_sum dmprop = m_diff / v_sum return logZ, dmprop, dvprop, dmt, dvt def", "np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2", "Description v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2 =", "* m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum + 0.5 * m_diff**2", "Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew -", "= np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new =", "vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE):", "+ np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da',", "+ hd1 beta_si = beta - hd2h # projection h = p_i -", "Description decay (float, optional): Description Returns: TYPE: Description \"\"\" try: for e in", "# self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my,", "n2cav (TYPE): Description decay (float, optional): Description alpha (float, optional): Description Returns: TYPE:", "vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description", "Description mb_size (TYPE): Description alpha (float, optional): Description prop_mode (TYPE, optional): Description Returns:", "label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :]", "self.x_next_1 + \\ (1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2", "Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior +=", "\"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # K", "= self.x_train yb = self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb =", "if find_log_lik: N = self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ", "self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0", "= idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size / alpha xb", "v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except", "* t2_old + (1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0,", "return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" #", "t1_old + t1_frac t2_new = (1.0 - alpha) * t2_old + t2_frac if", "beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu,", "compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat)", "- self.t01 self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0):", "# def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE): Description", "v (TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description x", "self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables self.gamma", "* np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 * sf2 * dsf2 #", "= np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] #", "Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum = v_t", "0: print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik = compute_energy", "* cur_n1 + frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2", "alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn,", "* tn * mwn**2 / oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn)", "minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs, :]", "(TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description decay (int, optional): Description Returns:", "K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove", "Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1],", ":] * h, axis=2) mean_i_new = mcav - dm / dm2 var_new_parallel =", "self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2", "except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n", ":] = decay * cur_n2 + (1 - decay) * n2_new elif mode", "(m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv =", "Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean /", "alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False,", "= self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new = (1 - alpha) *", "import scipy.linalg as npalg from scipy import special from scipy.optimize import minimize import", "- mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat]", "self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt,", "elif mode == self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs,", "Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) #", "+ (1 - decay) * n1_new self.x_next_2[idxs, :] = decay * cur_n2 +", "- dm / dm2 var_new_parallel = 1 / (1 / var_i_new + 1", "\"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m + cav_v *", "= \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop, vprop, extra_res) =", "self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE): Description \"\"\" self.sgp_layer.update_hypers(params, self.x_train)", "Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave(", "triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in", "grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description decay (float, optional): Description", "# merge info from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if", "* S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu -", "= {} grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return", "rho) * n2_ori # var_new_parallel = 1.0 / n1_damped # mean_new_parallel = var_new_parallel", "= self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm dv_s", "= 1.0 / post_2 mx = post_1 / post_2 return mx, vx def", "decay) * tx1_new tx2_new = decay * cur_t2 + (1 - decay) *", "Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer", "and (k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv,", "n (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (TYPE): Description Returns:", "compute_kernel(2 * ls, 2 * sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M,", "Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1 =", "NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi,", "\\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description", "up_1 self.x_up_2 = up_2 for n in range(0, self.N - 1): # deal", "* post_m self.tx1 = post_1 - self.t01 self.tx2 = post_2 - self.t02 def", ":] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16),", "= np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS", "[0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 +", "mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs,", "tx1 (TYPE): Description tx2 (TYPE): Description updated (bool): Description \"\"\" def __init__(self, y_train,", "for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i =", "self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): # \"\"\"Summary #", "optional): Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx,", "self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''):", "idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha)", "np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def", "= np.arange(N) xb = self.x_train yb = self.y_train else: idxs = np.random.choice(N, mb_size,", "frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] =", "cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1 =", "self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0", "= \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform", "dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary", "key_suffix] = self.zu return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params", "Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su", "key_suffix] = self.ls params['sf' + key_suffix] = self.sf params_zu_i = self.zu params['zu' +", "= self.variances[idxs, :] mean_i = self.means[idxs, :] tn = 1.0 / variance_i gn", "+ dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER *", "= np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2", "% (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except", "= SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer", "0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\"", "'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu", "np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat *", "Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n,", "dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat,", "vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout", "dAhat) grad_hyper = {} grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav':", "params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\"", "= np.exp(2 * self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv", "cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2", "self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ,", "m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE):", "TODO: put this in config parallel = True # TODO: put this in", "* np.pi * sn2) - 0.5 * np.log(alpha) logZ = exp_term + const_term", "cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1, up_2 =", "Returns: TYPE: Description \"\"\" N = self.N if mb_size >= N: idxs =", "vy, vyn # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE):", ":, :] = decay * t2_old + (1 - decay) * t2_new self.update_posterior()", "n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float, optional): Description Returns: TYPE:", "B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0", "* ls, 2 * sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, )))", "self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1 -", "cav_v * dmx new_v = cav_v - cav_v**2 * (dmx**2 - 2 *", "Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet)", "m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum + 0.5 * m_diff**2 /", "output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only", "SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew", "Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff", "self.zu return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description", "* (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs,", "Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo,", "sf2 = np.exp(2 * self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv =", "idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop,", "* gn**2 * wnScavwn term2b = - gn * tn * wnScavSinvm term2c", "grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def", "hd1 beta_si = beta - hd2h # projection h = p_i - np.einsum('ndab,nb->nda',", "v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all = {}", "return my, vy, vyn # def predict_f(self, inputs): # \"\"\"Summary # Args: #", "var_new_parallel = 1 / (1 / var_i_new + 1 / variance_i * (1", "variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean /", "lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for key in grad_all.keys(): grad_all[key] /=", "decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args:", "Description sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description", "\"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE:", "# return fs # def predict_y(self, inputs): # \"\"\"Summary # Args: # inputs", "key_suffix] = self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE):", "wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:,", "self.Kuuinv = np.zeros([M, M]) # variables for the hyperparameters self.ls = np.zeros([Din, ])", "[n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else:", "phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self):", "dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0,", "m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i)", "1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float,", "utils import * from kernels import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis", "Description Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE): Description mu (TYPE): Description", "- alpha) * t1_old + t1_frac t2_new = (1.0 - alpha) * t2_old", "KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances)", "beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav,", "self.dyn_layer = SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi:", "= p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs,", "# params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2 -", "= dm - 2 * dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1)", "* (1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha ==", "Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Su = self.Su mu =", "self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0):", "self.t2 = np.zeros([N, Dout, M, M]) # TODO self.mu = np.zeros([Dout, M, ])", "dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls *", "self.Dout if x_train is None: ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din,", "np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1", "alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def", "+= 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv,", "new_v = cav_v - cav_v**2 * (dmx**2 - 2 * dvx) new_p2 =", "+ alpha_term dvt = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2", "self.Dout = Dout = output_size self.M = M = no_pseudo self.N = N", "of q(U) N = self.N M = self.M Din = self.Din Dout =", "# if alpha == 1: # rho = 0.5 # n1_new = 1.0", "Description decay (int, optional): Description Returns: TYPE: Description \"\"\" try: for e in", "= compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf", "prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for latent", "import minimize import matplotlib.pyplot as plt import time import pdb from scipy.cluster.vq import", "Description Raises: NotImplementedError: Description \"\"\" new_m = mcav + vcav * dmcav new_v", "+ key_suffix] self.zu = params['zu' + key_suffix] # update Kuu given new hypers", "(bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args:", "cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v,", "self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do this once at the begining", "= np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M,", "alpha_term = 0.5 * (1 - alpha) * np.log(2 * np.pi * sn2)", "\\ a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute", "cur_t2 + (1 - decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :]", "/ prior_var # TODO: alternatitve method for non real-valued data post_m = PCA_reduce(y_train,", "lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description t01", "Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab',", "# \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns:", "new_hypers=False) if find_log_lik: N = self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s =", "self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu)", "x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE):", "energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return", "and Bhat dm_all = dm - 2 * dv * m dAhat =", "alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n,", "+ 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M,", "up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 #", "= \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm,", "extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout,", "new_hypers and x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf", "self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer(", "# update Kuu given new hypers self.compute_kuu() # compute mu and Su for", "no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train,", "np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac =", "self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str,", "grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs,", "self.t01 self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary", "hypers self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps = 10 # TODO:", "for latent variables N = self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din))", "self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix", "(int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik,", "Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object):", "grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav)", "- m_prop exp_term = -0.5 * m_diff**2 / v_sum const_term = -0.5 *", "Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description", "self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0],", "alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav", "= self.Dout * 0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE:", "-0.5 * m_diff**2 / v_sum const_term = -0.5 * np.log(2 * np.pi *", "Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper,", "psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls,", "extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None, optional):", "h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii", "= beta - hd2h # projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i)", "mode (TYPE): Description idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises:", "self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural params for", "# params (TYPE): Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params)", "self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum(", "e in range(no_epochs): if e % 50 == 0: print 'epoch %d/%d' %", "[muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout, vout, extra_res", "1.0 / post_2 mx = post_1 / post_2 return mx, vx # def", "\"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self): #", "Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din,", "Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat", "alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha)", "* n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav,", "* sf2 * dsf2 # compute the gradients Vmm = Su + np.einsum('da,db->dab',", "= {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav,", "latent variables N = self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2", "new_p1 = new_p2 * new_m frac_t2 = new_p2 - 1.0 / cav_v frac_t1", "dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs,", "np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args:", ":] t2n = self.t2[n, :, :, :] Suinvhat = self.Suinv - alpha *", "Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 * S + dKuu_via_logZ dhyp", "self.N if mb_size >= N: idxs = np.arange(N) xb = self.x_train yb =", "(TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def", "idxs, x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper,", "return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary", "# compute mu and Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes:", "kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper", "'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv,", "Din = self.Din Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' +", "begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1,", "(sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet return logZ_prior", "alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2", "/ post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v post_1", "wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn =", "predict_y(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description # Returns: #", "alpha (float, optional): Description Returns: TYPE: Description \"\"\" a = extra_args muhat, Suhat,", "class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description", "epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample", "= (1.0 - alpha) * t2_old + t2_frac if t1_new.shape[0] == 1: #", "np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02 = 1.0 / prior_var #", "compute the gradients Vmm = Su + np.einsum('da,db->dab', mu, mu) S = self.Dout", "- decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :,", "))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description", "np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if N < 10000: centroids, label", "self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) =", "def update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si", "alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...'", "decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self,", "self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) #", "= sgp_contrib + lik_contrib grad_all = {} for key in sgp_grad.keys(): grad_all[key] =", "return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer", "# \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params =", "mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new =", "kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff + Bkfukuf", "self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary", "= self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): # \"\"\"Summary", "\"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2", "= np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae',", "/ cav_v # neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1", "0.5 * tn * mwn**2 / oneminuswnSwn term2d = -0.5 / alpha *", ":] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 +=", "self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for n in", "return mout, vout def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE:", "prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs,", "import pdb from scipy.cluster.vq import kmeans2 from utils import * from kernels import", "dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat)", "self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) =", "mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop,", "SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self,", "compute_kernel(2 * self.ls, 2 * self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0]))", "+ \\ (1 - alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :]", "= np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M,", "= self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N,", "- 2 * dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat =", "Description Returns: TYPE: Description \"\"\" # merge info from output cav_up_m, cav_up_v, _,", "Args: params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" self.ls", "self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn", "dmcav new_v = vcav - vcav**2 * (dmcav**2 - 2 * dvcav) new_n2", "parallel (bool, optional): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" try:", "* np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a + term2b + term2c", "self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps,", "self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha)", "params (TYPE): Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) #", "<filename>geepee/pep_models.py \"\"\"Summary # TODO: this should reuse base models! \"\"\" import sys import", "= grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m + cav_v * dmx new_v", "prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din +", "Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls,", "- 1): # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2", "alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1", "# inputs (TYPE): Description # Returns: # TYPE: Description # \"\"\" # if", "compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def", "x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 *", "\"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str,", "dict to hold hypers, inducing points and parameters of q(U) N = self.N", "dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi)", "NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 +", "M = self.M ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu =", "Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2", "cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1", "sf2 = np.exp(2 * self.sf) zu = self.zu Kuuinv = self.Kuuinv a =", "Suinvhat = self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu - alpha *", "alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description v_t", "import matplotlib.pyplot as plt import time import pdb from scipy.cluster.vq import kmeans2 from", "Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew)", "output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din = Din = input_size self.Dout", "extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update", "* 1.0 / batch_size / alpha # scale_logZ = 0 xb = self.x_train[idxs,", "- np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i,", "np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2 return mout, vout def", "Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import *", "optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean,", "(TYPE): Description x (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\"", "extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description", "(TYPE): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" self.ls = params['ls'", "* n2_ori # var_new_parallel = 1.0 / n1_damped # mean_new_parallel = var_new_parallel *", "optional): Description prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\" N = self.N", "= params['ls' + key_suffix] self.sf = params['sf' + key_suffix] self.zu = params['zu' +", "+ self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1", "Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8] #", "here? self.t1[n, :, :] = t1_new self.t2[n, :, :, :] = t2_new #", "# update model with new hypers self.update_hypers(params) # run power-EP and compute grads", "%d' % n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE): Description", "self.M Din = self.Din Dout = self.Dout if x_train is None: ls =", "variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables", "= self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) =", "self.t02 = 1.0 / prior_var # TODO: alternatitve method for non real-valued data", "t1 (TYPE): Description t2 (TYPE): Description zu (TYPE): Description \"\"\" def __init__(self, no_train,", "dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0,", "self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05,", "== no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha)", ":] += self.x_prior_2 vx = 1.0 / post_2 mx = post_1 / post_2", "1.0 / var_new_parallel # n2_new = mean_new_parallel / var_new_parallel # n1_ori = 1.0", "muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 =", "gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural params", "sgp_layer (TYPE): Description sn (int): Description updated (bool): Description x_next_1 (TYPE): Description x_next_2", "+ (1 - alpha) * self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m", "0 # dm2 *= 0 # dm2 += 1e-16 # dv *= 0", "= \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt =", "minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if", "Description Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description M (TYPE): Description", "* n1_ori # n2_damped = rho * n2_new + (1.0 - rho) *", "self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables for the hyperparameters", "vout = psi0 + Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self, m,", ":] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 - alpha) * cur_n1 +", "decay * t2_old + (1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05,", "muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute", "+= self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0 / post_2 mx =", "init_params # def get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\"", "scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm dv_s = scale_logZ * dv", "Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__( y_train, hidden_size,", "compute the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1,", "npalg from scipy import special from scipy.optimize import minimize import matplotlib.pyplot as plt", "Args: n (TYPE): Description x (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis,", "self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new = -1.0", "compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf", "gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :]", "\"\"\" # prior factor cav_x1 = self.t01 + (1 - alpha) * self.tx1[n,", "range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 =", "# Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def", "= np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 =", "vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): #", "mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res,", "def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Su", "return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu", "cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif", "if not self.gp_emi: # only do this once at the begining for gaussian", "(TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N", "signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu)", "cav_x1 = self.t01 + (1 - alpha) * self.tx1[n, :] cav_x2 = self.t02", "params['sf' + key_suffix] self.zu = params['zu' + key_suffix] # update Kuu given new", "+ frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :]", "if mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1 -", "mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2 *", "alpha * tn**2 * gn**2 * wnScavwn term2b = - gn * tn", "Dout]) # pep variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M])", "y = self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v, extra_res) = \\", "dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din", "= [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout, vout,", "np.exp(2 * self.sf) zu = self.zu Kuuinv = self.Kuuinv a = extra_args muhat,", "KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1", "alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False)", "\"\"\" self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix] self.zu =", "SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description lik", "(TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0,", "dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn,", "\\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv,", "self.variances[idxs, :] mean_i = self.means[idxs, :] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac',", "Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu", "= np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if N < 10000: centroids,", "Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din = Din = input_size", "self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff =", "= 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ", "* self.ls, 2 * self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu", "muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff =", "dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv,", "= self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) # return params class SGPSSM(Base_SGPSSM):", "np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float, optional):", "Description n1cav (TYPE): Description n2cav (TYPE): Description decay (float, optional): Description alpha (float,", "TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx)", "2 * sf2 * dsf2 # compute the gradients Vmm = Su +", "info from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi:", "%d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay)", "= compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu,", "backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha, prop_info): N = self.N", "psi2, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7],", "TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3]", "np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff + Bkfukuf extra_res = [muhat, Suhat,", "x, self.zu) dls = dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2", "mean_new_parallel / var_new_parallel # n1_ori = 1.0 / variance_i # n2_ori = mean_i", "dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description", "Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (float, optional): Description", "A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2 return", "self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis,", "# pep variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) #", "(TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description M (TYPE):", "+ np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) -", "= 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity +=", "np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2 - mout**2 return mout, vout", "import special from scipy.optimize import minimize import matplotlib.pyplot as plt import time import", "hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) *", "for k in range(no_sweeps): if k % display_steps == 0: print 'PEP, epoch:", "{} grad_all = {} for k in range(no_sweeps): # if k % display_steps", "= kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample", "Description idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description", "Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params =", "n2_new elif mode == self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :]", "Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx,", "grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha,", "p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2)", "M]) self.t2 = np.zeros([N, Dout, M, M]) # TODO self.mu = np.zeros([Dout, M,", "tn * wnScavSinvm term2c = 0.5 * tn * mwn**2 / oneminuswnSwn term2d", "* n1_new self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay) *", "self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da',", "(1.0 - alpha) * t2_old + t2_frac if t1_new.shape[0] == 1: # TODO:", "cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav,", "grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes:", "= cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed =", "(sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5 *", "import kmeans2 from utils import * from kernels import * from lik_layers import", "Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return", "alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm,", "prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :]", "self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout =", "NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE):", "params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns:", "self.t1[n, :, :] t2n = self.t2[n, :, :, :] Suinvhat = self.Suinv -", "Description \"\"\" N = self.N M = self.M ls = np.exp(self.ls) sf2 =", "= self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu,", "TYPE: Description \"\"\" self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix]", "dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential(", "np.newaxis] gamma_si = gamma + hd1 beta_si = beta - hd2h # projection", "self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5", "term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis]", "= np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf,", "var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new", "Dout = self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu", "Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm", "lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5):", "self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs,", "if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x):", "# parallel update self.t1[n, :, :] = decay * t1_old + (1 -", "params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional):", "'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE):", "np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf)", "x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv,", "TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description", "gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :]", "u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls, 2", "power-EP and compute grads no_ep_sweeps = 10 # TODO: put this in config", "- np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii -", "x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train", "X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for", "(TYPE): Description updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0,", "backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description", "t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description updated", ":] = var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm,", "dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav", "x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, )) d2imed", "optional): Description Returns: TYPE: Description \"\"\" N = self.N M = self.M ls", "* self.ls, 2 * self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf =", "Description alpha (float, optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m", "prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\" N = self.N if mb_size", "Returns: TYPE: Description \"\"\" post_1 = self.t01 + self.tx1 post_2 = self.t02 +", "alpha) * cur_n1 + frac_n1 n2_new = (1 - alpha) * cur_n2 +", "self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu =", "self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n],", "1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch,", "print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description", "Description N (TYPE): Description sgp_layer (TYPE): Description sn (int): Description updated (bool): Description", "dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params", "Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS", "alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary", "posterior self.Su = Sunew[0, :, :, :] self.mu = munew[0, :, :] self.Suinv", "2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {}", "(1 - decay) * n2_new elif mode == self.PREV: idxs = np.arange(1, self.N)", "cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10,", "self.M ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu = self.zu Kuuinv", "- decay) * n2_new elif mode == self.PREV: idxs = np.arange(1, self.N) cur_n1", "self.y_train[idxs, :] for k in range(no_sweeps): if k % display_steps == 0: print", "np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 * S + dKuu_via_logZ", "in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([", "(1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0,", "= new_n2 * new_m frac_n2 = new_n2 - n2cav frac_n1 = new_n1 -", "np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None,", "compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description Returns: TYPE:", ":, :] t1_new = (1.0 - alpha) * t1_old + t1_frac t2_new =", "v_sum) alpha_term = 0.5 * (1 - alpha) * np.log(2 * np.pi *", "np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ])", "np import scipy.linalg as npalg from scipy import special from scipy.optimize import minimize", "def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE):", "grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v,", "alpha (TYPE): Description Returns: TYPE: Description \"\"\" # prior factor cav_x1 = self.t01", "self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str,", "= scale_logZ * dm dv_s = scale_logZ * dv dm2_s = scale_logZ *", "in grad_all.keys(): grad_all[key] /= N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha,", "pdb from scipy.cluster.vq import kmeans2 from utils import * from kernels import *", "/ (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 *", "(TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description updated (bool):", "= decay * t2_old + (1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:,", "dataset # TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m,", "alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N * 1.0 /", "from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from", "self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B,", "self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res,", "N = no_train # factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means =", "# variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def", "idxs = np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :]", "= self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn #", "np.pi * sn2) - 0.5 * np.log(alpha) logZ = exp_term + const_term +", "cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v,", "(TYPE): Description decay (float, optional): Description alpha (float, optional): Description Returns: TYPE: Description", "(TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE):", "grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N", "Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description", "alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises:", "batch_size = idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size / alpha", "= -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop = -0.5", "# TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn']", "* self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 /", "vcav * dmcav new_v = vcav - vcav**2 * (dmcav**2 - 2 *", "return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0):", "1 / (1 / var_i_new + 1 / variance_i * (1 - alpha))", "vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description decay", "n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description decay", "self.Dout, self.M) # natural params for latent variables N = self.N Din =", "Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description", "prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description", "= self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si =", "prior_mean (int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM,", "Description dv (TYPE): Description extra_args (TYPE): Description mx (TYPE): Description vx (TYPE): Description", "dmprop = m_diff / v_sum return logZ, dmprop, dvprop, dmt, dvt def update_factor_x(", "dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew = muhat +", "= - self.N * 1.0 / batch_size / alpha # scale_logZ = 0", "- self.N * 1.0 / batch_size / alpha xb = self.x_train[idxs, :] yb", "h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma,", "self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description", "(TYPE): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx']", "= self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha,", "(TYPE): Description extra_args (TYPE): Description x (TYPE): Description alpha (float, optional): Description Returns:", "1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs", "/ post_2 mx = post_1 / post_2 return mx, vx # def predict_f(self,", "optional): Description # Returns: # TYPE: Description # \"\"\" # if not self.updated:", "init_params.update(ssm_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns: # TYPE:", "np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf,", "+ temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi", "/ alpha - 1.0 scale_cav = - N * 1.0 / alpha scale_prior", "Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer", "y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10,", "= np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns:", "= np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) -", "* self.sn) v_sum = v_t + v_prop + sn2 / alpha m_diff =", "grad_all = {} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in", "(TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv,", "self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior +", "time import pdb from scipy.cluster.vq import kmeans2 from utils import * from kernels", "vx (None, optional): Description Returns: TYPE: Description \"\"\" if vx is None: return", ":, :, :] self.mu = munew[0, :, :] self.Suinv = Suinvnew[0, :, :,", "= mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha, prop_info):", "mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description decay", "= new_p2 * new_m frac_t2 = new_p2 - 1.0 / cav_v frac_t1 =", "(TYPE): Description tx1 (TYPE): Description tx2 (TYPE): Description updated (bool): Description \"\"\" def", "f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu and kuuinv", "self.t02 + (1 - alpha) * self.tx2[n, :] cav_v = 1.0 / cav_x2", "= self.means[idxs, :] tn = 1.0 / variance_i gn = mean_i wnScav =", "post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v post_1 = post_2", "alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update for entire dataset #", "kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd',", "(1 - alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) * self.x_next_2[idxs,", "axis=0) S = 0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf,", "params = dict() params['sf' + key_suffix] = sf params['ls' + key_suffix] = ls", "Description alpha (float, optional): Description prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\"", "= np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav': dvcav}", "sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE): Description \"\"\" def __init__(self, x_train,", "variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) # numpy variable", "import time import pdb from scipy.cluster.vq import kmeans2 from utils import * from", "return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx =", "alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description decay (int, optional): Description", "lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params) #", "Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf,", "axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 -", "_forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE): Description alpha", "Description ls (TYPE): Description M (TYPE): Description mu (TYPE): Description N (TYPE): Description", "= post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha", "= -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dmt = m_diff", "\"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet return", ":] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 +=", "given new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior()", "def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE):", "mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res,", "= scale_prior * phi_prior + scale_post * phi_post + scale_cav * phi_cav return", "+= np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n, alpha=1.0): \"\"\"Summary", "# self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description #", "prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb,", "= self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta h_si = p_i -", "= \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs", "% (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) for i in", "* self.sf) zu = self.zu Kuuinv = self.Kuuinv a = extra_args muhat, Suhat,", "# Returns: # TYPE: Description # \"\"\" # if not self.updated: # self.sgp_layer.update_posterior()", "cav_m (TYPE): Description cav_v (TYPE): Description decay (float, optional): Description Returns: TYPE: Description", "Dout, M]) self.t2 = np.zeros([N, Dout, M, M]) # TODO self.mu = np.zeros([Dout,", "vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n,", "TYPE: Description \"\"\" try: for e in range(no_epochs): if e % 50 ==", "(TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE):", "np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si =", "self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description", "parallel update for entire dataset # TODO: minibatch parallel idxs = np.arange(self.N) y", "self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs,", "Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv =", "/ v_sum return logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self, mode, dmcav,", "dvprop = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dmt =", "* h, axis=2) mean_i_new = mcav - dm / dm2 var_new_parallel = 1", "points and parameters of q(U) N = self.N M = self.M Din =", "= self.t01 + self.tx1 post_2 = self.t02 + self.tx2 vx = 1.0 /", "dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *= 0", "*= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False)", "+ self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1", "NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs", "alternatitve method for non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m,", "no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel,", "return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT:", "params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE:", "extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1], a[2],", "# my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self,", "mean_i / variance_i # n1_damped = rho * n1_new + (1.0 - rho)", "v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav,", "np.pi * v_sum) alpha_term = 0.5 * (1 - alpha) * np.log(2 *", "alpha # scale_logZ = 0 xb = self.x_train[idxs, :] yb = self.y_train[idxs, :]", "the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n +", "\"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A", "extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm", "parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel", "# \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params =", "- decay) * n1_new self.x_next_2[idxs, :] = decay * cur_n2 + (1 -", "= up_2 # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2", "self.M = M = no_pseudo self.N = N = no_train # factor variables", "mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE):", "(float, optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m = mcav", "np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu = self.zu Kuuinv = self.Kuuinv a", "# TODO: this should reuse base models! \"\"\" import sys import math import", "psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat)", "= 1.0 / var_new_parallel # n2_new = mean_new_parallel / var_new_parallel # n1_ori =", "\"\"\"Summary # Args: # inputs (TYPE): Description # Returns: # TYPE: Description #", "beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs,", "hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description", "dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i", "Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description t2", "else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav,", "dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE):", "* t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat,", "dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1", "grad_cav['vx'] new_m = cav_m + cav_v * dmx new_v = cav_v - cav_v**2", "Description x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args:", "/ post_2 return mx, vx # def predict_f(self, inputs): # \"\"\"Summary # Args:", "config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", "variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha", "Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params =", "self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True)", "dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0):", "a[4], a[5], a[6], a[7], a[8] # compute grads wrt Ahat and Bhat dm_all", "def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0):", "sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def", "= idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout = self.Dout Kuu = self.Kuu", "* dm dv_s = scale_logZ * dv dm2_s = scale_logZ * dm2 sgp_contrib,", "Returns: TYPE: Description \"\"\" # compute the leave one out moments t1n =", "grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay,", "def compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i", "= np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout,", "vout = kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat,", "raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav,", "vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE): Description x", "self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv + T2u self.Suinv = Vinv self.Su", "def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior", "update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav)", ":] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta h_si =", "latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean", "idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM):", "n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: idxs = np.arange(0,", "1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop,", "k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm,", "Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din =", "self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE):", "= np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)", "# TODO: do damping here? self.t1[n, :, :] = t1_new self.t2[n, :, :,", "no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description", "Description decay (float, optional): Description alpha (float, optional): Description Returns: TYPE: Description Raises:", "= np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)", "Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4],", "= dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 * sf2 *", "SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\ a[0], a[1], a[2], a[3],", "factor cav_x1 = self.t01 + (1 - alpha) * self.tx1[n, :] cav_x2 =", "y = self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v,", "= \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8] # compute", "natural params for latent variables N = self.N Din = self.Din self.x_prev_1 =", "Args: x_train (None, optional): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\"", "post_2 = self.t02 + self.tx2 vx = 1.0 / post_2 mx = post_1", "the gradients Vmm = Su + np.einsum('da,db->dab', mu, mu) S = self.Dout *", "self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...'", "(float, optional): Description prop_mode (TYPE, optional): Description Returns: TYPE: Description \"\"\" N =", "dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav", "= self.x_prev_1 + self.x_next_1 + \\ (1 - alpha) * self.x_up_1 cav_up_2 =", ":, :] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :, :]", "alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE): Description \"\"\"", "n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :]", "find_log_lik: N = self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ *", "1.0 / batch_size / alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs, :]", "mx = post_1 / post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE:", "Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din))", "self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s", "yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res)", "np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) qfu =", "= np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2", "\"\"\"Summary # TODO: this should reuse base models! \"\"\" import sys import math", "= np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat =", "Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return mout, vout, extra_res def", "dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha,", "(float, optional): Description Returns: TYPE: Description \"\"\" N = self.N scale_post = N", "else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns:", "{'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params", "prior factor cav_x1 = self.t01 + (1 - alpha) * self.tx1[n, :] cav_x2", "compute grads no_ep_sweeps = 10 # TODO: put this in config parallel =", "Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls,", "x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv", "vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def predict_f(self, inputs):", "np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v, extra_res) =", "def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior =", "= self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb", "ls = self.ls sf = self.sf Dout = self.Dout M = self.M zu", "in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn,", "/ alpha # scale_logZ = 0 xb = self.x_train[idxs, :] yb = self.y_train[idxs,", "Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description prior_mean (int,", "self.M) # natural params for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 =", "n2_damped = rho * n2_new + (1.0 - rho) * n2_ori # var_new_parallel", "= self.tx2[n, :] tx1_new = (1 - alpha) * cur_t1 + frac_t1 tx2_new", "self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1", "Dout = self.Dout if x_train is None: ls = np.log(np.ones((Din, )) + 0.1", "= True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def", "term2d) sgp_contrib = - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav", "in range(no_sweeps): # if k % display_steps == 0: # print 'PEP, epoch:", "grad_all[key] = lik_grad[key] energy /= N for key in grad_all.keys(): grad_all[key] /= N", "mean_i = self.means[idxs, :] var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :]", "= self.t01 + (1 - alpha) * self.tx1[n, :] cav_x2 = self.t02 +", "cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2", "decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE):", "data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0)", "prior_mean (int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM,", "= np.exp(2 * self.sn) v_sum = v_t + v_prop + sn2 / alpha", "Nb * np.sum(term2a + term2b + term2c + term2d) sgp_contrib = - term1", "= np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v, extra_res)", "vcav, dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2]", "dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper,", "= rho * n1_new + (1.0 - rho) * n1_ori # n2_damped =", "var_new_parallel # n1_ori = 1.0 / variance_i # n2_ori = mean_i / variance_i", "+ t2_frac if t1_new.shape[0] == 1: # TODO: do damping here? self.t1[n, :,", "= -0.5 / alpha * np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a", "alpha) * self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m = cav_v *", "/ post_v post_1 = post_2 * post_m self.tx1 = post_1 - self.t01 self.tx2", "\"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (TYPE):", ":] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2,", "post_1 = self.t01 + self.tx1 post_2 = self.t02 + self.tx2 vx = 1.0", "no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo", "= mean_div_var_i_new * var_new_parallel # if alpha == 1: # rho = 0.5", "display_steps == 0: # print 'PEP, epoch: %d / %d' % (k, no_sweeps)", "vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description Returns: TYPE: Description \"\"\"", "\"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat =", "dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv)", "mean_div_var_i_new = (mean_i_new / var_i_new + mean_i / variance_i * (1 - alpha))", "dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1 and", "\"\"\" try: for e in range(no_epochs): if e % 50 == 0: print", "t1_new.shape[0] == 1: # TODO: do damping here? self.t1[n, :, :] = t1_new", ":] = t2_new # TODO: update posterior self.Su = Sunew[0, :, :, :]", "B, kfu, kfu) vout = kff + Bpsi2 return mout, vout # TODO", "= -0.5 * m_diff**2 / v_sum const_term = -0.5 * np.log(2 * np.pi", "= self.Kuuinv + T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da',", "= 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2] +", "reuse base models! \"\"\" import sys import math import numpy as np import", "- 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ,", "Kuu given new hypers self.compute_kuu() # compute mu and Su for each layer", "(TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size,", "self.variances[idxs, :] mean_i = self.means[idxs, :] tn = 1.0 / variance_i gn =", "gamma = self.gamma beta = self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i)", "* cur_t1 + (1 - decay) * tx1_new tx2_new = decay * cur_t2", "+ (1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary", "self.sf) zu = self.zu Kuuinv = self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat,", "np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf)", "]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy", "np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su,", "# factor variables self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M,", "kfu) vout = kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu,", "decay=decay) else: # parallel update for entire dataset # TODO: minibatch parallel idxs", "* wnSwn term2a = 0.5 * alpha * tn**2 * gn**2 * wnScavwn", "phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet)", "Description parallel (bool, optional): Description decay (int, optional): Description Returns: TYPE: Description \"\"\"", "grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel", "def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description Returns:", "dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv,", "Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] tn = 1.0 /", "Kuuinv, dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda',", "np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if N <", "+ frac_n1 n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :]", "grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt,", "* np.sum(logZ) dm_s = scale_logZ * dm dv_s = scale_logZ * dv dm2_s", "= self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary", "in range(no_epochs): if e % 50 == 0: print 'epoch %d/%d' % (e,", "dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu,", ":] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] # compute cavity covariance", "np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma", "scale_logZ) energy = sgp_contrib + lik_contrib grad_all = {} for key in sgp_grad.keys():", "np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 =", "def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description", "h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si", "2 * self.sf, x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2", "\"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE):", "phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior *", "dvx = grad_cav['vx'] new_m = cav_m + cav_v * dmx new_v = cav_v", "in config parallel = True # TODO: put this in config energy, grad_all", "def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = -", "SuinvMuhat = self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda',", "= self.sf Dout = self.Dout M = self.M zu = self.zu self.Kuu =", "t1_new self.t2[n, :, :, :] = t2_new # TODO: update posterior self.Su =", "compute the leave one out moments t1n = self.t1[n, :, :] t2n =", "dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1],", "TYPE: Description \"\"\" post_1 = self.t01 + self.tx1 post_2 = self.t02 + self.tx2", "self.t2[n, :, :, :] = t2_new # TODO: update posterior self.Su = Sunew[0,", "= self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary", "display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ =", "up_2 for n in range(0, self.N - 1): # deal with the dynamics", "vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay)", "this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy,", "Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo,", "params['zu' + key_suffix] # update Kuu given new hypers self.compute_kuu() # compute mu", "axis=2) oneminuswnSwn = 1 - alpha * tn * wnSwn term2a = 0.5", "cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper,", "# frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new", "t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n,", "self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0,", "Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew =", "idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new", "parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] x = self.x_train[idxs, :] (m,", "self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix", "self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) # return", "from scipy.cluster.vq import kmeans2 from utils import * from kernels import * from", "\\ a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8] # compute grads", "= self.x_prev_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new", "prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i =", "* class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE):", "Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv))", ":] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha)", "= np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2 - mout**2 extra_res =", "Description \"\"\" # prior factor cav_x1 = self.t01 + (1 - alpha) *", "cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m,", "= Sunew[0, :, :, :] self.mu = munew[0, :, :] self.Suinv = Suinvnew[0,", "* self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m = cav_v * cav_x1", "gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description", "params (TYPE): Description mb_size (TYPE): Description alpha (float, optional): Description prop_mode (TYPE, optional):", "# mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) # return", "mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1", "self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt,", "v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m,", "(TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE):", "dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE): Description n1cav", "Description v (TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description", "Description dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description", "Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav", "dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:,", "mean_i_new = mcav - dm / dm2 var_new_parallel = 1 / (1 /", "alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for n in range(0, self.N -", "Description prior_mean (int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\"", "self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout =", "x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE:", "dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi +", "a[7], a[8] # compute grads wrt Ahat and Bhat dm_all = dm -", "logZ_prior = self.Dout * 0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns:", "/ (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs = np.arange(1,", ":] + self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2", "= self.Dout if x_train is None: ls = np.log(np.ones((Din, )) + 0.1 *", "def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description", "= N = no_train # factor variables self.t1 = np.zeros([N, Dout, M]) self.t2", "% display_steps == 0: print 'PEP, epoch: %d / %d' % (k, no_sweeps)", "prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT'", "grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs,", "idxs = np.arange(N) xb = self.x_train yb = self.y_train else: idxs = np.random.choice(N,", "Description n2cav (TYPE): Description idxs (TYPE): Description decay (float, optional): Description alpha (float,", "= self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs,", "a[8] # compute grads wrt Ahat and Bhat dm_all = dm - 2", "new_v new_n1 = new_n2 * new_m frac_n2 = new_n2 - n2cav frac_n1 =", "+ key_suffix] = self.ls params['sf' + key_suffix] = self.sf params_zu_i = self.zu params['zu'", "psi0 = np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2", "Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs): if e %", "Kuu and its gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv", "no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE): Description # no_samples (int, optional):", "Description alpha (TYPE): Description decay (TYPE): Description Returns: TYPE: Description \"\"\" # merge", "phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post * phi_post +", "* 0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "# natural params for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N,", "'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v,", "qfu, u_sample) vf = kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon =", "update kuu and kuuinv ls = self.ls sf = self.sf Dout = self.Dout", "new_n2 - n2cav frac_n1 = new_n1 - n1cav if mode == self.NEXT: idxs", "2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ =", "= self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu", "mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new =", "natural params for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din))", "zu = centroids if N < 10000: X1 = np.copy(x_train) else: randind =", "dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav':", "\\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn,", "logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5 * \\", "0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i =", "Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2 - mout**2", "def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation self.Suinv", "# sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn}", "(int, optional): Description # Returns: # TYPE: Description # \"\"\" # if not", "(k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info =", "x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Su = self.Su", "new_n1 = new_n2 * new_m frac_n2 = new_n2 - n2cav frac_n1 = new_n1", "= np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy variable for", "do damping here? self.t1[n, :, :] = t1_new self.t2[n, :, :, :] =", "(TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description decay (float,", "compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu and kuuinv ls =", "np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta =", "np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew", "t2n = self.t2[n, :, :, :] Suinvhat = self.Suinv - alpha * t2n", "p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi", "= 0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i", "= 0.5 * alpha * tn**2 * gn**2 * wnScavwn term2b = -", "Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din))", "extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew", "Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav,", "vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None,", "Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE): Description N", "+ dls grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary", "# \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params =", ":].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav(", "inducing points and parameters of q(U) N = self.N M = self.M Din", "KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE):", "decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha", "and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2,", "\"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu and kuuinv ls = self.ls", "+ self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1", "# def get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" #", "n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay", "dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis,", "- self.N * 1.0 / batch_size / alpha # scale_logZ = 0 xb", "Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat", "= self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def predict_f(self, inputs): # \"\"\"Summary", "extra_args (TYPE): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat,", ":] Suinvhat = self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu - alpha", "n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description decay (float, optional): Description", "= mean_new_parallel / var_new_parallel # n1_ori = 1.0 / variance_i # n2_ori =", "np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior =", "i in range(Din): ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params =", "kff - np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample =", "inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int,", "TYPE: Description \"\"\" # merge info from output cav_up_m, cav_up_v, _, _ =", "- alpha) * t2_old + t2_frac if t1_new.shape[0] == 1: # TODO: do", "= Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :, :] else: # parallel", "= np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 *", "# def predict_y(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description #", "- Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls, 2", "(1 - decay) * n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :]", "m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay)", "# \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers()", "idxs, alpha): # deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs,", "vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: #", "/ cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self,", "self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2", "= t1_new self.t2[n, :, :, :] = t2_new # TODO: update posterior self.Su", "= 0.5 * np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu))", "return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str,", "raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description", "(TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din = Din = input_size self.Dout =", ":, :] = decay * t1_old + (1 - decay) * t1_new self.t2[n,", "logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha)", "cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav", "Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" # dict to hold", "= np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf' + key_suffix]", "self.x_train[idxs, :] yb = self.y_train[idxs, :] energy = {} grad_all = {} for", "(TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE):", "update for entire dataset # TODO: minibatch parallel idxs = np.arange(self.N) y =", "return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE):", "dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu return sgp_contrib,", "= self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat,", "range(no_epochs): if e % 50 == 0: print 'epoch %d/%d' % (e, no_epochs)", "grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print", "[idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True)", "import math import numpy as np import scipy.linalg as npalg from scipy import", "np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu =", "grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha)", ":] yb = self.y_train[idxs, :] energy = {} grad_all = {} for k", "raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1", "(TYPE): Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params)", "< 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :]", "\"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params)", "decay) * t1_new self.t2[n, :, :, :] = decay * t2_old + (1", "Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff", "x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da',", "cav_v = 1.0 / cav_x2 cav_m = cav_v * cav_x1 return cav_m, cav_v,", "cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m,", "np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" #", "(TYPE): Description Returns: TYPE: Description \"\"\" # prior factor cav_x1 = self.t01 +", "from config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N", "hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE:", "m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav,", "munew[0, :, :] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :,", "phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE):", "self.N * 1.0 / batch_size / alpha xb = self.x_train[idxs, :] yb =", "self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu)", "np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)", "# emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn': self.sn} # params = dict(sgp_params)", "Smm, psi1, psi2, Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None):", "from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: #", "key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" params = {} M =", "* np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior", "zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train", "x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt", "Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf,", "m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *= 0 # dm2 *= 0", "dmcav, dmcav) - 2 * dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat,", "(TYPE): Description cav_v (TYPE): Description decay (float, optional): Description Returns: TYPE: Description \"\"\"", "dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT,", "Description n2cav (TYPE): Description decay (float, optional): Description alpha (float, optional): Description Returns:", "def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs,", "= np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1,", "= compute_kernel(2 * ls, 2 * sf, zu, zu) self.Kuu += np.diag(JITTER *", "grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m,", "prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE): Description \"\"\" self.sgp_layer.update_hypers(params,", "n2_ori # var_new_parallel = 1.0 / n1_damped # mean_new_parallel = var_new_parallel * n2_damped", "+ Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout,", "dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m,", "Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description", "idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except", "def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = -", "/ batch_size / alpha # scale_logZ = 0 xb = self.x_train[idxs, :] yb", "beta - hd2h # projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i", "self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for latent variables self.tx1", "* self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2", "self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv + T2u", "))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\"", ":] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0, :, :] else:", "no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1,", "zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if N < 10000:", "# deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii", "= scale_logZ * dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg(", "init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params =", "self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 - alpha) * cur_n1", "self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] # compute cavity", "self.updated = True # K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K))", "_ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v,", ":] else: # parallel update self.t1[n, :, :] = decay * t1_old +", "leave one out moments t1n = self.t1[n, :, :] t2n = self.t2[n, :,", "super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self,", "dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x(", "self.t2[n, :, :, :] Suinvhat = self.Suinv - alpha * t2n SuinvMuhat =", "# for k in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) # return", "(TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if", "rho * n1_new + (1.0 - rho) * n1_ori # n2_damped = rho", "= grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat,", "dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper", "Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B", "psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2 -", "dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *= 0 #", "self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay) * n2_new else:", "Returns: TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum = v_t +", "= np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if N", ":] + self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :] cav_next_2", "cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode", ":] zu = centroids if N < 10000: X1 = np.copy(x_train) else: randind", "# TODO: remove for loop here # for k in range(K): # fs[:,", "scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args:", "idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description alpha (TYPE): Description", "self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta h_si", "self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :] return cav_next_1 /", "params['ls' + key_suffix] self.sf = params['sf' + key_suffix] self.zu = params['zu' + key_suffix]", "self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None,", "\"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # mf,", "dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si = beta -", "TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior", "self.sf params_zu_i = self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self,", "TYPE: Description \"\"\" # compute the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2,", "v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv = \\", "(str, optional): Description Returns: TYPE: Description \"\"\" # dict to hold hypers, inducing", "= no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for", "np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description", "alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n,", "'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha", ":] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din, ))", "0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r',", "self.tx1 = post_1 - self.t01 self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0,", "(1 - decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new", "]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv =", "/ v_sum**2 dvprop = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2", "cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 - alpha)", "Description no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR,", "self.Din + self.Dcon_emi, self.Dout, self.M) # natural params for latent variables N =", "no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs,", "# TODO: update posterior self.Su = Sunew[0, :, :, :] self.mu = munew[0,", "= self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps,", "logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior", "cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper,", "np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac',", "Description updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description", "= self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu = self.mu Suinv =", "+ frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay) *", "= 1.0 / n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] =", "muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx': dmx,", "kfu) vout = kff + Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self,", "1): # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 =", "fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :])))", "axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2))", "= np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables self.gamma =", "mx (TYPE): Description vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv", "prop_info): N = self.N Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout", "- term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] *", "if t1_new.shape[0] == 1: # TODO: do damping here? self.t1[n, :, :] =", "(TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE): Description N (TYPE):", "Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (int, optional): Description", "/ alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav =", "cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 # deal with the dynamics", "# self.sgp_layer.update_posterior() # self.updated = True # K = no_samples # fs =", "e % 50 == 0: print 'epoch %d/%d' % (e, no_epochs) if not", "np.sum(term2a + term2b + term2c + term2d) sgp_contrib = - term1 - term2", "compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm,", "import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import", "Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6]", "muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 *", "no_samples (int, optional): Description # Returns: # TYPE: Description # \"\"\" # if", "Description decay (int, optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat", "phi_prior + scale_post * phi_post + scale_cav * phi_cav return phi def forward_prop_thru_cav(self,", "2 * dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab',", "a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8] # compute grads wrt", "Args: n (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" # prior", "m, v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description", "+ self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx", "self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT", "= np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional):", ":] cur_t2 = self.tx2[n, :] tx1_new = (1 - alpha) * cur_t1 +", "self.Dout, K)) # # TODO: remove for loop here # for k in", "params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE):", "cur_n2 = self.x_next_2[idxs, :] n1_new = (1 - alpha) * cur_n1 + frac_n1", "= 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description", ":, k] = self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs): # \"\"\"Summary", "idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size / alpha # scale_logZ", "x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1", "Description vx (None, optional): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\"", "elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new", "# self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE:", "SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M", "1.0 / prior_var # TODO: alternatitve method for non real-valued data post_m =", "muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav =", "\"\"\"Summary Args: n (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\"", "post_m self.tx1 = post_1 - self.t01 self.tx2 = post_2 - self.t02 def inference(self,", "dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2,", "__init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train", "self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new = (1 - alpha) * cur_t1", "post_2 mx = post_1 / post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns:", "mode == self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 =", "new_m frac_n2 = new_n2 - n2cav frac_n1 = new_n1 - n1cav if mode", "fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' % n[0]) # plt.show() def", "= np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha)", "np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu, kfu.T)", "cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def", "\\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx': dmx, 'vx':", "cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE):", "self.t2[n, :, :, :] t1_new = (1.0 - alpha) * t1_old + t1_frac", "= post_1 / post_2 return mx, vx # def predict_f(self, inputs): # \"\"\"Summary", "TODO: put this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True)", "= self.y_train[idxs, :] x = self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav(", "(TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description decay (float, optional): Description Returns:", "TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m = mcav + vcav * dmcav", "x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din,", "= mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE:", "np.newaxis, :] * h, axis=2) mean_i_new = mcav - dm / dm2 var_new_parallel", "2 * dvx) new_p2 = 1.0 / new_v new_p1 = new_p2 * new_m", "dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav", "+ \\ (1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 +", "decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m", "dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v,", "mode == self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :]", "self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1,", "params for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01", "kff + Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary Args: x (TYPE):", "Description \"\"\" N = self.N scale_post = N * 1.0 / alpha -", "(TYPE, optional): Description Returns: TYPE: Description \"\"\" N = self.N if mb_size >=", "return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n", "range(no_sweeps): # if k % display_steps == 0: # print 'PEP, epoch: %d", "= self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1,", "(dmcav**2 - 2 * dvcav) new_n2 = 1.0 / new_v new_n1 = new_n2", "# \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params =", "= self.Su mu = self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h,", "np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign,", "\"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers() # ssm_params = {'sn':", "= self.t02 + self.tx2 vx = 1.0 / post_2 mx = post_1 /", "/= N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size", "= new_p1 - cav_m / cav_v # neg_idxs = np.where(frac_t2 < 0) #", "(mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:,", "Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var", "x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str, optional): Description Returns:", "Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx,", ":] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si, axis=2))", "M).reshape((M, 1)), (1, Din)) else: if N < 10000: centroids, label = kmeans2(x_train,", "self.N = N = no_train # factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20)", "= np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2 - mout**2 return mout,", "compute mu and Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din", "* self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf, mx,", "* alpha * tn**2 * gn**2 * wnScavwn term2b = - gn *", "Description vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A =", "params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description", "params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str,", "= 1.0 / new_v new_n1 = new_n2 * new_m frac_n2 = new_n2 -", ":] mean_i = self.means[idxs, :] var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis,", "gradients Vmm = Su + np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv", "Gauss_Layer) and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1", "{'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self,", "(sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5 *", "_, _ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n],", "= np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M,", "== self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1", "Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1,", "cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1 - alpha) * self.x_up_1 cav_up_2", "= m_t - m_prop exp_term = -0.5 * m_diff**2 / v_sum const_term =", "optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (float, optional):", "psi1, psi2, Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary", "p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii", "+= (1 - alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) *", "1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs =", "wnSwn term2a = 0.5 * alpha * tn**2 * gn**2 * wnScavwn term2b", "[n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update for entire dataset", "np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary", "(TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE):", "Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave(", "vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE):", "mb_size, replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] # update model", "Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu =", "epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1", "no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description", "params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description alpha", "\"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet)", "dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M)))", "new_n1 - n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 =", "Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2 - mout**2 extra_res", "dm2 *= 0 # dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs,", "no_pseudo (TYPE): Description \"\"\" self.Din = Din = input_size self.Dout = Dout =", "(TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description decay (float,", "n1cav if mode == self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1 =", ":] = tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description", "= np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn", "phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post", "[n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop,", "grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav", "- Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :] t2_old", "np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a + term2b + term2c +", "decay (int, optional): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat =", "- alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return", "= np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu", "+ np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls, 2 * self.sf, x,", "once at the begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch", "self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def", "Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE):", "kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad',", "KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE):", "v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t", "Description Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description", "vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (None, optional):", "np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0 + Bpsi2", "alpha) * cur_n2 + frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 + (1", "dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description x (TYPE): Description alpha", "TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() #", "n, x, alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE): Description alpha (TYPE):", "post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 +", "# compute the posterior approximation if new_hypers and x_train is not None: Kfu", "N < 10000: X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000],", "TYPE: Description \"\"\" N = self.N if mb_size >= N: idxs = np.arange(N)", "\"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx,", "p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi =", "centroids = x_train[randind[0:M], :] zu = centroids if N < 10000: X1 =", "term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2 = N / Nb *", "np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5]))", "= self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, x_train,", "np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :]", "= - gn * tn * wnScavSinvm term2c = 0.5 * tn *", "real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m,", "gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M])", ":] * h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :]", "= prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i", "n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :]", "covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav =", "key_suffix] = sf params['ls' + key_suffix] = ls params['zu' + key_suffix] = zu", "prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib", "mode (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\"", "(TYPE): Description extra_args (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (float,", "Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N,", "self.M zu = self.zu self.Kuu = compute_kernel(2 * ls, 2 * sf, zu,", "alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity()", "B, kfu, kfu) vout = kff + Bpsi2 return mout, vout def sample(self,", "variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav,", "# def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description #", "neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :]", "= np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma +", "dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi", "alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0]", "+ (1 - decay) * n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs,", "v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x,", "get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my, vy, vyn", "# no_samples (int, optional): Description # Returns: # TYPE: Description # \"\"\" #", "sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE): Description # no_samples", "self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :]", "= self.variances[idxs, :] mean_i = self.means[idxs, :] # compute cavity covariance betacavKuu =", "/ cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] +", "self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M)", "-0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop = -0.5 /", "= 1.0 / post_v post_1 = post_2 * post_m self.tx1 = post_1 -", "self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float,", "self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop,", "= np.zeros([M, M]) # variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf", "Description sn (int): Description updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description", "as np import scipy.linalg as npalg from scipy import special from scipy.optimize import", ":, :] Suinvhat = self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu -", "S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S", "alpha) * self.tx1[n, :] cav_x2 = self.t02 + (1 - alpha) * self.tx2[n,", "return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs,", "# return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", "* Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] tn = 1.0", "the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns:", "grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads", "sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params)", "no_pseudo self.N = N = no_train # factor variables self.t1 = np.zeros([N, Dout,", "self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m = cav_v * cav_x1 return", "p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab',", "Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls", "= np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew -", "np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m,", "= \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\", "self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE:", "p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:,", "emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor(", "v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N", "= self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for n", "update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description", "compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd',", "if x_train is None: ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, ))", "= self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha,", "self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\", "self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] +", "M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm =", "Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu,", "Description \"\"\" # compute the posterior approximation if new_hypers and x_train is not", "vx = 1.0 / post_2 mx = post_1 / post_2 return mx, vx", "= decay * cur_t2 + (1 - decay) * tx2_new self.tx1[n, :] =", "= SuinvMunew - SuinvMuhat t1_old = self.t1[n, :, :] t2_old = self.t2[n, :,", "self.beta = np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv, Kuu", "# return mf, vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args:", "# lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params) # return params", "no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z(", "\"\"\" # update kuu and kuuinv ls = self.ls sf = self.sf Dout", "np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv", "Description x (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" a", "0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 =", "idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv", "dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav", "B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2", "self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1,", "# K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO:", "N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE): Description tx1", "params = {} M = self.M Din = self.Din Dout = self.Dout params['ls'", "1, M).reshape((M, 1)), (1, Din)) else: if N < 10000: centroids, label =", "\\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn,", "# TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] x =", "phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self,", "M]) self.beta = np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv,", "t1_new = (1.0 - alpha) * t1_old + t1_frac t2_new = (1.0 -", "params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args:", "alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i / variance_i * (1 -", "sf params['ls' + key_suffix] = ls params['zu' + key_suffix] = zu return params", "dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper =", "mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm =", "base models! \"\"\" import sys import math import numpy as np import scipy.linalg", "alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description decay", "scale_logZ = - self.N * 1.0 / batch_size / alpha # scale_logZ =", "is None: ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf =", "mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout =", "cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs,", "mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0", "* np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input", "= self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) * self.x_next_1[idxs,", "cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m,", "Description lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description", "mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0)", "Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n,", "logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1", "temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 =", "self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] +", "cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT,", "logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) =", "tn = 1.0 / variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav)", "wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav", "Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation", "# if k % display_steps == 0: # print 'PEP, epoch: %d /", "run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N", "\\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v,", "idxs, m, v, dm, dm2, dv, x, alpha, prop_info): N = self.N Nb", "decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay (TYPE): Description Returns:", "self.x_train[idxs, :] yb = self.y_train[idxs, :] for k in range(no_sweeps): if k %", "posterior approximation if new_hypers and x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf,", "self.t1[n, :, :] = decay * t1_old + (1 - decay) * t1_new", "self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args:", "Din = input_size self.Dout = Dout = output_size self.M = M = no_pseudo", "Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din))", ":] # update model with new hypers self.update_hypers(params) # run power-EP and compute", "SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx,", "m_prop exp_term = -0.5 * m_diff**2 / v_sum const_term = -0.5 * np.log(2", "update Kuu given new hypers self.compute_kuu() # compute mu and Su for each", "# compute grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat", "2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] =", "(TYPE): Description idxs (TYPE): Description decay (float, optional): Description alpha (float, optional): Description", "0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' % n[0])", "* np.log(alpha) logZ = exp_term + const_term + alpha_term dvt = -0.5 /", "not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # K = no_samples #", "axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si)", "(TYPE): Description v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2", "{} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] =", "= input_size self.Dout = Dout = output_size self.M = M = no_pseudo self.N", "self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1 - alpha) * self.x_up_1", "(1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1", "\"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my, vy, vyn =", "alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel update for entire dataset", "= np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables", "post_m = (post_m - post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m) post_2", "new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation if new_hypers", "mean_i = self.means[idxs, :] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu)", "# Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn", "Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description", "epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu", "if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # mf, vf =", "v_t + v_prop + sn2 / alpha m_diff = m_t - m_prop exp_term", "Description \"\"\" # update kuu and kuuinv ls = self.ls sf = self.sf", "\"\"\" N = self.N scale_post = N * 1.0 / alpha - 1.0", "np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M,", "cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'],", "deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV,", "deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV,", "# TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE):", "= post_1 / post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description", "S, Kuu - np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] +", "= decay * cur_n2 + (1 - decay) * n2_new else: raise NotImplementedError('unknown", "t2_old + (1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :],", "base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary Attributes:", "mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs,", "return my, vy # def update_hypers(self, params): # \"\"\"Summary # Args: # params", "def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE):", "SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE): Description zu (TYPE): Description \"\"\"", "self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv,", "Args: mode (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description", "1.0 / cav_v frac_t1 = new_p1 - cav_m / cav_v # neg_idxs =", "q(U) N = self.N M = self.M Din = self.Din Dout = self.Dout", "self.t01 + self.tx1 post_2 = self.t02 + self.tx2 vx = 1.0 / post_2", "self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new = -1.0 / dm2 - np.sum(k_i[:,", "n2_ori = mean_i / variance_i # n1_damped = rho * n1_new + (1.0", "- np.dot(qfu, kfu.T) Lf = np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf", "no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description", "__init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size", "lik_grad[key] energy /= N for key in grad_all.keys(): grad_all[key] /= N return energy,", "self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post * phi_post", "np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m - post_m_mean) / post_m_std", "compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5", "(TYPE): Description alpha (float, optional): Description prop_mode (TYPE, optional): Description Returns: TYPE: Description", "get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" params", "Description t1 (TYPE): Description t2 (TYPE): Description zu (TYPE): Description \"\"\" def __init__(self,", "lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config", "self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1,", "self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update for entire", "mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :]", "idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs,", "self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\"", "decay * cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs, :] = decay", "= 0 xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] energy = {}", "new_m = cav_m + cav_v * dmx new_v = cav_v - cav_v**2 *", "ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) #", "= 1.0 / prior_var # TODO: alternatitve method for non real-valued data post_m", "y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE):", "return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Su)", "= self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :]", "= decay * cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs, :] =", "cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new = (1 - alpha)", "dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def get_hypers(self): # \"\"\"Summary", "v_sum = v_t + v_prop + sn2 / alpha m_diff = m_t -", "(TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" a = extra_args", "logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm(", "self.Splusmm = np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv, Kuu", "range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n],", "cur_n2 + (1 - decay) * n2_new elif mode == self.PREV: cur_n1 =", "def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs", "np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input =", "= self.t2[n, :, :, :] t1_new = (1.0 - alpha) * t1_old +", "# TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers()", "= {'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return", "alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v,", "no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for loop", "var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv,", "extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn,", "layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer", "50 == 0: print 'epoch %d/%d' % (e, no_epochs) if not parallel: for", "self.tx1 post_2 = self.t02 + self.tx2 vx = 1.0 / post_2 mx =", "super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M)", "cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs,", "self.N * 1.0 / batch_size / alpha # scale_logZ = 0 xb =", "dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm,", "kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu =", "alpha, compute_dm2=True) # dm *= 0 # dm2 *= 0 # dm2 +=", "= 1.0 / new_v new_p1 = new_p2 * new_m frac_t2 = new_p2 -", ":, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si", "self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res,", "self.t2[n, :, :, :] = decay * t2_old + (1 - decay) *", "update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary", "self.sf = params['sf' + key_suffix] self.zu = params['zu' + key_suffix] # update Kuu", "alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description alpha (float, optional):", "params): # \"\"\"Summary # Args: # params (TYPE): Description # Returns: # TYPE:", "k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta =", "* tn**2 * gn**2 * wnScavwn term2b = - gn * tn *", "dBhat, Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv,", "(None, optional): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" # dict", "minimize import matplotlib.pyplot as plt import time import pdb from scipy.cluster.vq import kmeans2", "% 50 == 0: print 'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e,", "optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m = mcav +", "= self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, key_suffix=''):", "# sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) #", "\"\"\" (sign, logdet) = np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5", "mout, vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary", "no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo", "if not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn", "self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :]", "= self.M Din = self.Din Dout = self.Dout if x_train is None: ls", "# params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "Description vx (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" N", "inducing points, Kuuinv, Kuu and its gradients self.zu = np.zeros([M, Din]) self.Kuu =", "wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv,", "Description mx (TYPE): Description vx (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "decay) * n1_new self.x_prev_2[idxs, :] = decay * cur_n2 + (1 - decay)", "dvx) new_p2 = 1.0 / new_v new_p1 = new_p2 * new_m frac_t2 =", "* n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs,", "self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) #", "k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si,", "cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2", "no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N * 1.0", "alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay,", ":] = decay * t2_old + (1 - decay) * t2_new self.update_posterior() #", "tn * wnSwn term2a = 0.5 * alpha * tn**2 * gn**2 *", "alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat +", "_, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m,", ":] = decay * t1_old + (1 - decay) * t1_new self.t2[n, :,", "def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 +", "KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv +", "\"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation if new_hypers and", "= self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post * phi_post + scale_cav", "np.log(2 * np.pi * sn2) - 0.5 * np.log(alpha) logZ = exp_term +", "dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print", "np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) # numpy variable for inducing points,", "Description N (TYPE): Description sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE): Description", "grad_cav, cav_m_n, cav_v_n, decay=decay) else: # parallel update for entire dataset # TODO:", "idxs = np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs,", "np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] =", "v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si,", "(float, optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (int,", "/ oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2 = N /", "(int): Description Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE):", "no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description", ":] cav_m, cav_v, _, _ = self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\", "np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi)", "Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" a = extra_args muhat,", "key_suffix] # update Kuu given new hypers self.compute_kuu() # compute mu and Su", "\\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn,", "= self.means[idxs, :] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav", "params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary", "mwn**2 / oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2 = N", "alpha == 1: # rho = 0.5 # n1_new = 1.0 / var_new_parallel", "/ Nb * np.sum(term2a + term2b + term2c + term2d) sgp_contrib = -", "decay) * n1_new self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay)", "= np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff", "mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE):", "cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs, :] = decay * cur_n2", "self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su,", "dvcav) new_n2 = 1.0 / new_v new_n1 = new_n2 * new_m frac_n2 =", "k] = self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs): # \"\"\"Summary #", "class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description", "1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs,", "parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel", "Description vcav (TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description", "dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv,", "v, dm, dm2, dv, x, alpha, prop_info): N = self.N Nb = idxs.shape[0]", "= self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2]", "= np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav =", "alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1 - alpha)", "SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params", "(1 - decay) * t1_new self.t2[n, :, :, :] = decay * t2_old", "wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS =", "+ self.tx1 post_2 = self.t02 + self.tx2 vx = 1.0 / post_2 mx", "prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i", "alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE):", "zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns:", "cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do", "= np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 *", "self.zu = params['zu' + key_suffix] # update Kuu given new hypers self.compute_kuu() #", "self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat)", "updated (bool): Description x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'):", "to hold hypers, inducing points and parameters of q(U) N = self.N M", "= np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 =", "Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description updated (bool): Description", "a[2], a[3], a[4], a[5], a[6], a[7], a[8] # compute grads wrt Ahat and", "tx1_new = (1 - alpha) * cur_t1 + frac_t1 tx2_new = (1 -", "dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description", "(post_m - post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0", "self.ls, 2 * self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd',", "(1 - decay) * n1_new self.x_next_2[idxs, :] = decay * cur_n2 + (1", "\"\"\" sn2 = np.exp(2 * self.sn) v_sum = v_t + v_prop + sn2", "n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn,", "here # for k in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) #", "self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls,", "== 0: print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik =", "== self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :]", "ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav =", "Raises: NotImplementedError: Description \"\"\" new_m = mcav + vcav * dmcav new_v =", "wnScavwn term2b = - gn * tn * wnScavSinvm term2c = 0.5 *", "dv (TYPE): Description extra_args (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha", "dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel update", "extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught", "\"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE):", "get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params =", "self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v,", "= self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S =", "# Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params", "cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay,", "m, v, dm, dm2, dv, x, alpha, prop_info): N = self.N Nb =", "self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def", "lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik)", "= 0.5 # n1_new = 1.0 / var_new_parallel # n2_new = mean_new_parallel /", "term2a = 0.5 * alpha * tn**2 * gn**2 * wnScavwn term2b =", "h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma + hd1 beta_si =", "self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' % n[0]) #", "* dsf2 # compute the gradients Vmm = Su + np.einsum('da,db->dab', mu, mu)", "ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] =", "* dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv,", "np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE:", "v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info)", "post_m_mean = np.mean(post_m, axis=0) post_m_std = np.std(post_m, axis=0) post_m = (post_m - post_m_mean)", "class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description", "self.x_train[n, :].reshape([1, self.Din]) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn,", ":] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis,", "yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n,", "update_factor(self, idxs, mcav, vcav, dm, dm2, dv, alpha, prop_info): h, beta_si, gamma_si =", "= 10 # TODO: put this in config parallel = True # TODO:", "= self.ls params['sf' + key_suffix] = self.sf params_zu_i = self.zu params['zu' + key_suffix]", "print 'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e,", "idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def", "= post_1 - self.t01 self.tx2 = post_2 - self.t02 def inference(self, alpha=1.0, no_epochs=10,", "np.zeros([Dout, M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M])", "gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si,", "\"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description alpha (float, optional): Description prop_mode", "lik (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description sn", "decay * cur_t2 + (1 - decay) * tx2_new self.tx1[n, :] = tx1_new", "Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat", "Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu", "\\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\", "no_epochs (int, optional): Description parallel (bool, optional): Description decay (int, optional): Description Returns:", "parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential(", "(TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description mx (TYPE):", "Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx,", "+ key_suffix] # update Kuu given new hypers self.compute_kuu() # compute mu and", "mu and Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE):", "return init_params # def get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description #", "mcav + vcav * dmcav new_v = vcav - vcav**2 * (dmcav**2 -", "= prior_mean / prior_var self.t02 = 1.0 / prior_var # TODO: alternatitve method", "= 0.5 * tn * mwn**2 / oneminuswnSwn term2d = -0.5 / alpha", "= self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do this once at the", "dvcav, mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description", "(1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay * cur_n1", ":, :, :] self.SuinvMu = SuinvMunew[0, :, :] else: # parallel update self.t1[n,", "mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs,", "+ scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary", "= self.x_prev_2 + self.x_next_2 + \\ (1 - alpha) * self.x_up_2 cav_up_1[0, :]", "self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv))", "= idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size / alpha #", "self.means[idxs, :] tn = 1.0 / variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb',", "\\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop, vprop, extra_res) = \\", "* self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample)", "X1 = np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist =", "is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def", "extra_args (TYPE): Description x (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description", "Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update", "np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2,", "a[6] Kuuinv = self.Kuuinv # compute grads wrt Ahat and Bhat dAhat =", "p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi +", "np.exp(self.ls), sf2, x, self.zu) dls = dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf", "= self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2,", "TYPE: Description \"\"\" # compute the posterior approximation if new_hypers and x_train is", "= np.einsum('ndab,bc->ndac', beta_si, Kuu) mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu -", "np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def", "Ahat and Bhat dm_all = dm - 2 * dv * m dAhat", "mx (TYPE): Description vx (None, optional): Description Returns: TYPE: Description \"\"\" if vx", "np.sum(logZ) dm_s = scale_logZ * dm dv_s = scale_logZ * dv dm2_s =", "== self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :] +", "= tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01 +", "np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var self.UP,", "x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\"", "# projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i,", "= (1 - alpha) * cur_n1 + frac_n1 n2_new = (1 - alpha)", "variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha", "mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat] return", "= np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2 return mout, vout", "frac_n2 = new_n2 - n2cav frac_n1 = new_n1 - n1cav if mode ==", "K)) # # TODO: remove for loop here # for k in range(K):", "= np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu", "np.linalg.cholesky(vf) epsilon = np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return", "1.0 / new_v new_n1 = new_n2 * new_m frac_n2 = new_n2 - n2cav", "dmcav) - 2 * dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac',", "= compute_energy and (k == no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info", "2 * dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat))", "+ (1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self,", "if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # K = no_samples", "dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi", "\"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv", "dlogZd_dmi[:, :, np.newaxis] hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis]", "Description Returns: TYPE: Description \"\"\" N = self.N M = self.M ls =", "sgp_contrib = - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav =", "/ alpha * np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a + term2b", "Dout = output_size self.M = M = no_pseudo self.N = N = no_train", "/ cav_x2 cav_m = cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def", "mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (TYPE):", "t1n Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat,", "- cav_v**2 * (dmx**2 - 2 * dvx) new_p2 = 1.0 / new_v", "compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A)", "/ cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: idxs = np.arange(0, self.N -", "== self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\ (1 - alpha) *", "self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu,", "mx (TYPE): Description vx (None, optional): Description alpha (float, optional): Description Returns: TYPE:", "- Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i", "params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf params_zu_i = self.zu", ")) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed + 1e-16)", "dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2,", "M, M]) self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm", "update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation self.Suinv =", "p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha * tn * wnSwn", "= tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "- alpha) * cur_n1 + frac_n1 n2_new = (1 - alpha) * cur_n2", "vout def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\"", "= np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv))", "decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def", "compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def", "0.5 * np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return", "key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description key_suffix (str, optional): Description Returns: TYPE:", "yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha)", "(float, optional): Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs): if", "self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV,", "cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2,", "self.Din Dout = self.Dout if x_train is None: ls = np.log(np.ones((Din, )) +", "cav_v_n, decay=decay) else: # parallel update for entire dataset # TODO: minibatch parallel", "(TYPE): Description lik (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE):", "/ alpha m_diff = m_t - m_prop exp_term = -0.5 * m_diff**2 /", "epsilon = np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu, epsilon) kff =", "(TYPE): Description sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE): Description \"\"\" def", "Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params", "\\ (1 - alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] +=", "in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all", "\\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v,", "for inducing points, Kuuinv, Kuu and its gradients self.zu = np.zeros([M, Din]) self.Kuu", "self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self, params, x_train, key_suffix=''):", "for i in range(Din): ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params", "else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids if N", "k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii,", "= self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs): # \"\"\"Summary # Args:", "def get_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params", "v_sum const_term = -0.5 * np.log(2 * np.pi * v_sum) alpha_term = 0.5", "parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel", "perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab',", "(e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt:", "% 50 == 0: print 'epoch %d/%d' % (e, no_epochs) if not parallel:", ":] self.mu = munew[0, :, :] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu", "%d' % (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) for i", "/ v_sum const_term = -0.5 * np.log(2 * np.pi * v_sum) alpha_term =", "+= np.sum(dv) dsf = 2 * sf2 * dsf2 # compute the gradients", "= 1.0 / variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn", "betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv,", "= new_n1 - n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2", "self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv)", "Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description", "d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads = {}", "* cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs, :] = decay *", "import Base_SGPR, Base_SGPLVM, Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din", "(TYPE): Description vx (None, optional): Description alpha (float, optional): Description Returns: TYPE: Description", "= self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha,", "self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var", "self.t01 = prior_mean / prior_var self.t02 = 1.0 / prior_var # TODO: alternatitve", "= Din = input_size self.Dout = Dout = output_size self.M = M =", "(int, optional): Description parallel (bool, optional): Description decay (int, optional): Description Returns: TYPE:", "(1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns:", "= np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {}", "self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf,", "# n1_new = 1.0 / var_new_parallel # n2_new = mean_new_parallel / var_new_parallel #", "y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description", "np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu", "self.N lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm dv_s =", "\"\"\" # compute the posterior approximation if new_hypers and x_train is not None:", "self.x_up_2 = up_2 # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1,", "np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm,", "self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False):", "frac_n1 = new_n1 - n1cav if mode == self.NEXT: idxs = np.arange(0, self.N", "if e % 50 == 0: print 'epoch %d/%d' % (e, no_epochs) if", "cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop,", "self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy variable", "phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx", "vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da',", "self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary", "1, self.Din + self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din", "in range(0, self.N - 1): # deal with the dynamics factors here cav_t_m,", "grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha,", "Dout, M, M]) # TODO self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout,", "Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2", "mx, vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy,", "Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description M", "self.sn} # params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) # return params class", "vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): #", "muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv,", "my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params):", "# TODO self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu", "t2_new # TODO: update posterior self.Su = Sunew[0, :, :, :] self.mu =", "+ (1 - decay) * n1_new self.x_prev_2[idxs, :] = decay * cur_n2 +", "up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for", "\"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" #", "= kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu", "Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description M (TYPE): Description N", "self.gp_emi: # only do this once at the begining for gaussian emission lik", "# compute the leave one out moments t1n = self.t1[n, :, :] t2n", "cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\", "np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 * sf2 * dsf2 # compute", "/ variance_i gn = mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd',", "extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE):", "TODO: do damping here? self.t1[n, :, :] = t1_new self.t2[n, :, :, :]", "dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0):", "alpha) * t1_old + t1_frac t2_new = (1.0 - alpha) * t2_old +", "each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", ":, :, :] t1_new = (1.0 - alpha) * t1_old + t1_frac t2_new", "optional): Description parallel (bool, optional): Description decay (int, optional): Description Returns: TYPE: Description", "np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :],", "n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v", "self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :] yb =", "mcav - dm / dm2 var_new_parallel = 1 / (1 / var_i_new +", "kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2 * self.sf,", "v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE):", "cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2", "const_term = -0.5 * np.log(2 * np.pi * v_sum) alpha_term = 0.5 *", "self.ls, 2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2", "/ %d' % (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) m_cav,", ":], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d' % n[0]) # plt.show()", "n2_new = (1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay", "m_diff / v_sum return logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self, mode,", "vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args:", "h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i =", "__init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description", "post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0 / post_2", "def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE):", "(mean_i_new / var_i_new + mean_i / variance_i * (1 - alpha)) mean_new_parallel =", "= decay * cur_t1 + (1 - decay) * tx1_new tx2_new = decay", "dm2, dv, alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i =", "h, axis=2) mean_i_new = mcav - dm / dm2 var_new_parallel = 1 /", "params.update(lik_params) # return params class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE):", "= Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da',", "return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign,", "here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2", "self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2", "k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta h_si = p_i", "= self.t1[n, :, :] t2_old = self.t2[n, :, :, :] t1_new = (1.0", "Description emi_layer (TYPE): Description lik (TYPE): Description M (TYPE): Description N (TYPE): Description", "2 * self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 =", "self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode", "TYPE: Description \"\"\" Su = self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon", "self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural params for latent variables N", "vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay)", "* self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif", "\"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Su = self.Su mu", "cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args:", "munew) t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old =", "# # TODO: remove for loop here # for k in range(K): #", "* mwn**2 / oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2 =", "= dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE):", "t2_new = (1.0 - alpha) * t2_old + t2_frac if t1_new.shape[0] == 1:", "alpha, prop_info): N = self.N Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf)", "wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv,", "self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size", "* (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2,", "Description y_train (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description Raises: NotImplementedError:", "alpha, prop_info): h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :]", "wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su)", ":] t1_new = (1.0 - alpha) * t1_old + t1_frac t2_new = (1.0", "(TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description Raises:", "def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description", "\"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay (TYPE): Description Returns: TYPE:", "vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt,", "frac_t1 tx2_new = (1 - alpha) * cur_t2 + frac_t2 tx1_new = decay", "np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed +", "np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] tn =", "cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop,", "key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for key in grad_all.keys():", "the begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0:", "self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] * h_si,", "mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha", "key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key]", "self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0]", "k in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs #", "mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "= self.Kuuinv Su = self.Su mu = self.mu Suinv = self.Suinv p_i =", "with new hypers self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps = 10", "dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda',", "alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description alpha (TYPE): Description Returns:", "KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi", "inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int,", "= np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind =", "decay (float, optional): Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx =", "= self.t02 + (1 - alpha) * self.tx2[n, :] cav_v = 1.0 /", "= np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx,", "np.newaxis, :] * gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h", "(TYPE): Description vx (None, optional): Description Returns: TYPE: Description \"\"\" if vx is", "key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" self.ls = params['ls' + key_suffix]", "* dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew", "self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v,", "self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat", "batch_size / alpha # scale_logZ = 0 xb = self.x_train[idxs, :] yb =", "self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] +=", "m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2, dv,", "cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay)", "= params['zu' + key_suffix] # update Kuu given new hypers self.compute_kuu() # compute", "hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior() class SGPR(Base_SGPR):", "grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description", "Kuu - np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] + dsf", "Returns: TYPE: Description \"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity =", "= self.ls sf = self.sf Dout = self.Dout M = self.M zu =", "= self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm,", "# n1_damped = rho * n1_new + (1.0 - rho) * n1_ori #", "np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1,", "(TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train,", "self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat)", "factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep", "Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls,", "Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description decay (int, optional):", "for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 =", "= np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 =", "cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch", "self.SuinvMu = np.zeros([Dout, M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout,", "self.zu) dls = dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 *", "in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy", "cav_v # neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1 =", "optional): Description Returns: TYPE: Description \"\"\" params = {} M = self.M Din", "params['zu' + key_suffix] = zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix", "self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag =", "a[3], a[4], a[5], a[6], a[7], a[8] # compute grads wrt Ahat and Bhat", "term2b = - gn * tn * wnScavSinvm term2c = 0.5 * tn", "(TYPE): Description no_pseudo (TYPE): Description lik (str, optional): Description prior_mean (int, optional): Description", "cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs,", "new_m frac_t2 = new_p2 - 1.0 / cav_v frac_t1 = new_p1 - cav_m", "Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu =", "+ dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis] *", "0.5 * alpha * tn**2 * gn**2 * wnScavwn term2b = - gn", "self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout, M]) self.beta", "np.newaxis] gamma = self.gamma beta = self.beta h_si = p_i - np.einsum('dab,nb->nda', beta,", "alpha (TYPE): Description decay (TYPE): Description Returns: TYPE: Description \"\"\" # merge info", "= Dout = output_size self.M = M = no_pseudo self.N = N =", "Description \"\"\" mx, vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return", "e % 50 == 0: print 'epoch %d/%d' % (e, no_epochs) if parallel:", "x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag", "alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps)", "t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:,", "x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav", "compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae',", "ls params['zu' + key_suffix] = zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args:", "* m_diff**2 / v_sum const_term = -0.5 * np.log(2 * np.pi * v_sum)", "a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute grads wrt Ahat", "m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha,", "\"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\"", "Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE): Description zu", "\"\"\"Summary # Args: # params (TYPE): Description # Returns: # TYPE: Description #", "no_ep_sweeps = 10 # TODO: put this in config parallel = True #", "x, x) kff += np.diag(JITTER * np.ones(x.shape[0])) kfu = compute_kernel(2 * self.ls, 2", "n1_new = 1.0 / var_new_parallel # n2_new = mean_new_parallel / var_new_parallel # n1_ori", "N = self.N scale_post = N * 1.0 / alpha - 1.0 scale_cav", "+ 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV:", "= self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ,", "+= self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0", "mcav = np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu,", "compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE: Description \"\"\" N", "= SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args:", "(1, Din)) else: if N < 10000: centroids, label = kmeans2(x_train, M, minit='points')", "TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self): # \"\"\"Summary", "one out moments t1n = self.t1[n, :, :] t2n = self.t2[n, :, :,", "np.linalg.slogdet(self.Su) phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu *", "Lu, epsilon) kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x) kff", "compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if", "cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0", "here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n + 1], alpha) cav_tm1_m,", "np.exp(2 * self.sf) Dout = self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su", "Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description x_prior_2 (TYPE): Description x_up_1 (TYPE): Description", "# if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # mf, vf", "decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1],", "+ np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx': dmx, 'vx': dvx,", "zu = self.zu Kuuinv = self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat,", "self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE:", "vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn,", "cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop,", "# perform Power-EP update munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner =", "np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self):", "cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) *", "vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self, x): \"\"\"Summary", "cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 =", "self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 # deal with", "(TYPE): Description M (TYPE): Description mu (TYPE): Description N (TYPE): Description sf (int):", "mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2, dv, x, alpha, prop_info): N", "= self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn,", "self.x_up_2 = up_2 for n in range(0, self.N - 1): # deal with", "* cur_n2 + (1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def", "np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf)", "= munew[0, :, :] self.Suinv = Suinvnew[0, :, :, :] self.SuinvMu = SuinvMunew[0,", "alpha (float, optional): Description Returns: TYPE: Description \"\"\" if vx is None: return", "{'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args,", "Description extra_args (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (float, optional):", "= - N * 1.0 / alpha scale_prior = 1 phi_prior = self.compute_phi_prior()", "= %d' % n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE):", "alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\"", "wrt Ahat and Bhat dm_all = dm - 2 * dv * m", "(cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs = np.arange(1, self.N)", "= np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :]", "cur_t1 + (1 - decay) * tx1_new tx2_new = decay * cur_t2 +", "Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2", "for k in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs", "= {} for k in range(no_sweeps): # if k % display_steps == 0:", "NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout,", "backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE):", "= self.compute_cavity_x(idxs, alpha) (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha)", "np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS *", "+ (1 - decay) * t2_new self.update_posterior() # axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r',", "decay (int, optional): Description Returns: TYPE: Description \"\"\" try: for e in range(no_epochs):", "and x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf =", "ls[i] = np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf' +", "alpha * np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a + term2b +", "= True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf)", "cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary", "in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2", "self.Dout M = self.M zu = self.zu self.Kuu = compute_kernel(2 * ls, 2", "decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional):", "self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha)", "params.update(emi_params) # params.update(ssm_params) # return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description", "- np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)), axis=0) S = 0.5 * S +", "# my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self,", ":] self.SuinvMu = SuinvMunew[0, :, :] else: # parallel update self.t1[n, :, :]", "= np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu = self.zu Kuuinv = self.Kuuinv", "no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) for i in range(batch_size): m_cav,", "axis=0) self.SuinvMu = np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu)", "= self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new = -1.0 / dm2 -", "Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\ a[0], a[1], a[2],", "self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" #", "k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 /", "= v_t + v_prop + sn2 / alpha m_diff = m_t - m_prop", "except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode", "cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha):", "Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 =", "/ prior_var self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV',", "new_p2 - 1.0 / cav_v frac_t1 = new_p1 - cav_m / cav_v #", "print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik = compute_energy and", "extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m,", "= self.zu return params def update_hypers(self, params, x_train, key_suffix=''): \"\"\"Summary Args: params (TYPE):", "SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M", "== self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :]", "dataset # TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] x", "(1 - alpha) * self.tx1[n, :] cav_x2 = self.t02 + (1 - alpha)", "np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i]", "2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu", "(TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din = Din =", "yb = self.y_train[idxs, :] # update model with new hypers self.update_hypers(params) # run", "Description extra_args (TYPE): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" muhat,", "Description parallel (bool, optional): Description decay (float, optional): Description Returns: TYPE: Description \"\"\"", "dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn,", "dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description", "= mean_i wnScav = np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm", "= \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn,", "np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns:", "= np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls),", "grad_all = {} for k in range(no_sweeps): # if k % display_steps ==", "(TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha)", "grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor(", "self.gamma beta = self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i =", "Description n1cav (TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description decay (float, optional):", "* cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m,", "self.sn) v_sum = v_t + v_prop + sn2 / alpha m_diff = m_t", "update self.t1[n, :, :] = decay * t1_old + (1 - decay) *", "self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary #", "+ 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha", "= \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop,", "= np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 +", "KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :,", "def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs", "...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description", "grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m", "- alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :]", "tx2_new = decay * cur_t2 + (1 - decay) * tx2_new self.tx1[n, :]", "(TYPE): Description decay (TYPE): Description Returns: TYPE: Description \"\"\" # merge info from", "alpha_term dvt = -0.5 / v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop", "# self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary", "cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs, :] = decay * cur_n2", "+ term2d) sgp_contrib = - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda', Kuuinv, mcav)", "cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor(", "_forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description vx (TYPE): Description Returns: TYPE:", "run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps,", "rho * n2_new + (1.0 - rho) * n2_ori # var_new_parallel = 1.0", "muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary", "{} grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper,", "and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 =", "Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac',", "= ls params['zu' + key_suffix] = zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary", "frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new", "np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new = mcav - dm / dm2", ":, :, :] Suinvhat = self.Suinv - alpha * t2n SuinvMuhat = self.SuinvMu", "\\ self.compute_cavity_x(self.PREV, alpha) cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs =", "%d/%d' % (e, no_epochs) if not parallel: for n in range(self.N): yn =", "mb_size (TYPE): Description alpha (float, optional): Description prop_mode (TYPE, optional): Description Returns: TYPE:", "def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None, optional): Description", "= np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02 = 1.0 / prior_var", "dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha,", "self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav(", "yb = self.y_train[idxs, :] energy = {} grad_all = {} for k in", "post_2 = 1.0 / post_v post_1 = post_2 * post_m self.tx1 = post_1", "Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout, M])", "self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params # def get_hypers(self):", "x (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat,", ">= N: idxs = np.arange(N) xb = self.x_train yb = self.y_train else: idxs", "for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key in lik_grad.keys(): grad_all[key] =", "+ (1 - decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] =", "alpha (float, optional): Description Returns: TYPE: Description \"\"\" # compute the leave one", "N (TYPE): Description sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE): Description \"\"\"", "output_size self.M = M = no_pseudo self.N = N = no_train # factor", "= np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls = np.zeros((Din,", "= 0.5 * np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat))", "= 1.0 / variance_i # n2_ori = mean_i / variance_i # n1_damped =", "0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign,", "alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional):", "= 0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v post_1 = post_2 *", "+ 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1", "- decay) * n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE:", "(cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :]", "psi2, Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args:", "decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional): Description parallel (bool,", "tn * mwn**2 / oneminuswnSwn term2d = -0.5 / alpha * np.log(oneminuswnSwn) term2", "dv, psi2) # compute grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all,", "yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha)", "decay) * n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description", "hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma", "Description \"\"\" # compute the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0)", "extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din", "2 * sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv =", "dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n],", "1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1 = self.x_up_1[idxs, :]", "+ mean_i / variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel", "control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik", "exp_term + const_term + alpha_term dvt = -0.5 / v_sum + 0.5 *", "M, minit='points') else: randind = np.random.permutation(N) centroids = x_train[randind[0:M], :] zu = centroids", ":] for k in range(no_sweeps): if k % display_steps == 0: print 'PEP,", "+ self.x_next_1 + \\ (1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 +", "np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs,", "update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha", "Description Returns: TYPE: Description \"\"\" # compute the leave one out moments t1n", "* self.sf, mx, vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd',", "y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE):", "-np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi)", "# lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params", "new_p2 * new_m frac_t2 = new_p2 - 1.0 / cav_v frac_t1 = new_p1", "(1 - alpha) * self.x_up_2 cav_up_1[0, :] += self.x_prior_1 cav_up_2[0, :] += self.x_prior_2", "1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 -", "new_m = mcav + vcav * dmcav new_v = vcav - vcav**2 *", "a[6], a[7], a[8] # compute grads wrt Ahat and Bhat dm_all = dm", "Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) # def init_hypers(self):", "% display_steps == 0: # print 'PEP, epoch: %d / %d' % (k,", "alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary", "munew = muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) -", "sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info)", "- np.diag(JITTER * np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] + dsf grads['ls']", "m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z(", "# \"\"\"Summary # Args: # inputs (TYPE): Description # no_samples (int, optional): Description", "N < 10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N)", "TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1", "+ 1 / variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new", "train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha,", "Kuuinv = self.Kuuinv Su = self.Su mu = self.mu Suinv = self.Suinv p_i", "'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik = compute_energy and (k", ":] n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new = (1", "= extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1],", "(1 - decay) * n1_new self.x_prev_2[idxs, :] = decay * cur_n2 + (1", "do this once at the begining for gaussian emission lik if isinstance(self.emi_layer, Gauss_Layer)", "Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1 =", "TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn = params['sn'] #", "m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N =", "Returns: TYPE: Description \"\"\" params = {} M = self.M Din = self.Din", "+ (1 - decay) * n2_new elif mode == self.PREV: idxs = np.arange(1,", "(str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer", "= -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 =", "= X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N) ls =", "alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2", "self.zu self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu) self.Kuu +=", "else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean')", "Kuuinv) dmcav = 2 * np.einsum('ndab,ndb->nda', dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat)", "< 0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n,", "- alpha) * cur_t2 + frac_t2 tx1_new = decay * cur_t1 + (1", "new_p1 - cav_m / cav_v # neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs]", "vout = psi0 + Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat,", "find_log_lik = compute_energy and (k == no_sweeps-1) for i in range(batch_size): m_cav, v_cav,", "Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'],", "self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff", "# TODO: put this in config energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel,", "np.sum(k_i[:, np.newaxis, :] * gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis]", "1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] = sf params['ls'", "# init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def", "loop here # for k in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs)", "(TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat", "lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik)", "n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE):", "dm2 var_new_parallel = 1 / (1 / var_i_new + 1 / variance_i *", "- 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1", "Description \"\"\" if mode == self.UP: cav_up_1 = self.x_prev_1 + self.x_next_1 + \\", "grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n,", "scale_post = N * 1.0 / alpha - 1.0 scale_cav = - N", "(TYPE): Description m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE:", "dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm,", "= zu return params def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description", "Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" # prior factor cav_x1 =", "Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout,", "alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt:", "+ (1 - alpha) * self.tx1[n, :] cav_x2 = self.t02 + (1 -", "(TYPE): Description updated (bool): Description x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train,", "mx = post_1 / post_2 return mx, vx # def predict_f(self, inputs): #", "grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab',", "lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE):", "cav_m + cav_v * dmx new_v = cav_v - cav_v**2 * (dmx**2 -", "cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2 = \\ self.compute_cavity_x(self.NEXT, alpha) idxs = np.arange(self.N - 1)", "Returns: TYPE: Description \"\"\" # compute the posterior approximation if new_hypers and x_train", "scale_post * phi_post + scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n, mx,", "Description # no_samples (int, optional): Description # Returns: # TYPE: Description # \"\"\"", "if N < 10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind =", "np.zeros([M, M]) # variables for the hyperparameters self.ls = np.zeros([Din, ]) self.sf =", "def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.t01 + self.tx1 post_2", "self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 =", "energy, grad_all = self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self,", "decay * cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs, :] = decay", "np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv + T2u self.Suinv = Vinv", "Description updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1):", "1 - alpha * tn * wnSwn term2a = 0.5 * alpha *", "\\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP", "Description \"\"\" try: for e in range(no_epochs): if e % 50 == 0:", "(TYPE): Description vx (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat,", "* h_si, axis=2)) dlogZd_dmi = -dlogZd_dmi2 * (mean_i - np.sum(k_i[:, np.newaxis, :] *", "\"\"\" import sys import math import numpy as np import scipy.linalg as npalg", "1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1", "- alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :]", "prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs,", "(TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description sn (int):", "t2_old = self.t2[n, :, :, :] t1_new = (1.0 - alpha) * t1_old", "idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE):", "extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt", "prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo", "0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)),", "(float, optional): Description alpha (float, optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description", "Base_SGPSSM from config import * class SGP_Layer(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "given new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior(x_train,", "+ Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary Args: x (TYPE): Description", "extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha,", "dsf = 2 * sf2 * dsf2 # compute the gradients Vmm =", "Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the", "Description \"\"\" self.ls = params['ls' + key_suffix] self.sf = params['sf' + key_suffix] self.zu", "self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav,", "'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf) psi1,", "* logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs,", "mu) term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu *", "= {} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] + dls grads['zu']", "zu = self.zu self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu)", "optional): Description Returns: TYPE: Description \"\"\" N = self.N if mb_size >= N:", "dls = dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf = 2 * sf2", "0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns:", "self.NEXT: idxs = np.arange(0, self.N - 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs,", "# TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params = self.emi_layer.get_hypers()", "gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h,", "class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description", "Args: mode (TYPE): Description idxs (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m +", "N = self.N M = self.M Din = self.Din Dout = self.Dout if", "updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary", "= no_train # factor variables self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N,", "def compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float, optional): Description", "params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE):", "Description sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE): Description \"\"\" def __init__(self,", "Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params", "scale_prior * phi_prior + scale_post * phi_post + scale_cav * phi_cav return phi", "+ np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav", "with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x_sequential(self.PREV, [n", "SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha", "variables N = self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 =", "/ variance_i # n2_ori = mean_i / variance_i # n1_damped = rho *", "* self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat,", "alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum", "my, vy # def update_hypers(self, params): # \"\"\"Summary # Args: # params (TYPE):", "dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE): Description n1cav (TYPE): Description n2cav", "dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new = mcav - dm", "def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha (TYPE): Description Returns:", "for loop here # for k in range(K): # fs[:, :, k] =", "import * from kernels import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from", "== 1: # rho = 0.5 # n1_new = 1.0 / var_new_parallel #", "Bhat, kfu, kfu) vout = kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat,", "if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v,", "vx, self.zu) mout = np.einsum('nm,ndm->nd', psi1, Ahat) Bhatpsi2 = np.einsum('ndab,nab->nd', Bhat, psi2) vout", "gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M)", "Bhat, psi2) vout = psi0 + Bhatpsi2 - mout**2 extra_res = [muhat, Suhat,", "mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha == 1: # rho =", "return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0]", "display_steps): batch_size = idxs.shape[0] scale_logZ = - self.N * 1.0 / batch_size /", "= Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old = self.t1[n, :,", "# init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def get_hypers(self): # \"\"\"Summary #", "grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE):", "self.run_pep(idxs, no_ep_sweeps, alpha, parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha,", ":] * gamma, axis=2)) hd1 = h_si * dlogZd_dmi[:, :, np.newaxis] hd2h =", "* 1.0 / alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior()", "np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum(", "dmx new_v = cav_v - cav_v**2 * (dmx**2 - 2 * dvx) new_p2", "self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps = 10 # TODO: put", "self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def", "np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self):", "sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE): Description tx2", "Description \"\"\" post_1 = self.t01 + self.tx1 post_2 = self.t02 + self.tx2 vx", "== 0: # print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik", "* wnScavwn term2b = - gn * tn * wnScavSinvm term2c = 0.5", "1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3", "-1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new = mcav", "optional): Description Returns: TYPE: Description \"\"\" # compute the leave one out moments", "Description \"\"\" new_m = mcav + vcav * dmcav new_v = vcav -", "hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior(x_train, new_hypers=True) def", "# axs[0].set_title('n = %d' % n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args:", "super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self,", "(TYPE): Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE):", "cav_x2 = self.t02 + (1 - alpha) * self.tx2[n, :] cav_v = 1.0", "model with new hypers self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps =", "/ variance_i # n1_damped = rho * n1_new + (1.0 - rho) *", "sgp_params = self.sgp_layer.get_hypers() # lik_params = self.lik_layer.get_hypers() # params = dict(sgp_params) # params.update(lik_params)", "(bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE):", "cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 for n in range(0,", "p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :]", "(m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv", "= np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE):", "# init_params = dict(sgp_params) # init_params.update(lik_params) # return init_params # def get_hypers(self): #", "/ alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] for k in", "alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay,", "each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion p_i = self.KuuinvKuf[:,", "np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1", "== no_sweeps-1) m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 =", "this in config parallel = True # TODO: put this in config energy,", "ls, 2 * sf, zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv", "Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv", "Description no_pseudo (TYPE): Description lik (str, optional): Description prior_mean (int, optional): Description prior_var", "vx # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description", "= self.N scale_post = N * 1.0 / alpha - 1.0 scale_cav =", "# update kuu and kuuinv ls = self.ls sf = self.sf Dout =", "mu = self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si", "*= 0 # dm2 *= 0 # dm2 += 1e-16 # dv *=", "# print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik = compute_energy", "and Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): # deletion", "Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" # compute the leave", "- self.t02 def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional):", "np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M, M]) # TODO self.mu =", "x = self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha)", "self.Din, self.Dout, self.M) # natural params for latent variables self.tx1 = np.zeros((self.N, self.Din))", "self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances)", "kff + Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary", "= mcav + vcav * dmcav new_v = vcav - vcav**2 * (dmcav**2", "class SGPSSM(Base_SGPSSM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description", "= self.Din Dout = self.Dout if x_train is None: ls = np.log(np.ones((Din, ))", "remove for loop here # for k in range(K): # fs[:, :, k]", "Description lik (str, optional): Description prior_mean (int, optional): Description prior_var (int, optional): Description", "Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv,", "grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m + cav_v * dmx new_v =", "np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv, Kuu and its", "self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary", "Description zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args:", "self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0,", "= np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac", "not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn =", "self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha):", "mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x()", "= params['sf' + key_suffix] self.zu = params['zu' + key_suffix] # update Kuu given", "dvt def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0):", "= mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls, 2 *", "compute_dm2=True) # dm *= 0 # dm2 *= 0 # dm2 += 1e-16", "\"\"\" Kuuinv = self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac',", "%d / %d' % (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1)", "{} M = self.M Din = self.Din Dout = self.Dout params['ls' + key_suffix]", "parallel update self.t1[n, :, :] = decay * t1_old + (1 - decay)", "hd2h # projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd',", "self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) #", "cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description", "== self.NEXT: idxs = np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2", "Dout * logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i =", "_forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description", "/ dm2 - np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new = mcav -", "cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary", "self.x_up_1 = up_1 self.x_up_2 = up_2 # deal with the dynamics factors here", "B, psi2) vout = psi0 + Bpsi2 - mout**2 return mout, vout def", "dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v,", "Kuuinv) dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav,", "Args: # params (TYPE): Description # Returns: # TYPE: Description # \"\"\" #", "np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1,", "= np.einsum('ab,ndb->nda', Kuuinv, mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi =", "= psi0 + Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm,", "my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params):", "(TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE):", "TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return", "= (1.0 - alpha) * t1_old + t1_frac t2_new = (1.0 - alpha)", "0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5", "Description dv (TYPE): Description extra_args (TYPE): Description x (TYPE): Description alpha (float, optional):", "cur_t1 + frac_t1 tx2_new = (1 - alpha) * cur_t2 + frac_t2 tx1_new", "Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE):", "TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum = v_t + v_prop", "N = no_train # factor variables self.t1 = np.zeros([N, Dout, M]) self.t2 =", "zu, zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self,", "= post_2 * post_m self.tx1 = post_1 - self.t01 self.tx2 = post_2 -", "Description Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description", "matplotlib.pyplot as plt import time import pdb from scipy.cluster.vq import kmeans2 from utils", "phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE: Description", "* v_sum) alpha_term = 0.5 * (1 - alpha) * np.log(2 * np.pi", "\"\"\"Summary # Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() #", "= np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables for the hyperparameters self.ls", "* dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb,", "vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self, inputs, no_samples=1): #", "variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] tn = 1.0 / variance_i", "= np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb',", "= np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha *", "# inputs (TYPE): Description # no_samples (int, optional): Description # Returns: # TYPE:", "self.N M = self.M Din = self.Din Dout = self.Dout if x_train is", "= np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) # numpy variable for inducing", "dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all = {} for key", "p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi)", "yb = self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb = self.x_train[idxs, :]", "prior_var # TODO: alternatitve method for non real-valued data post_m = PCA_reduce(y_train, self.Din)", "+ (1.0 - rho) * n1_ori # n2_damped = rho * n2_new +", "dv, Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2,", "Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE): Description mu", "n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav, mcav, vcav,", "\"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def _forward_prop_deterministic_thru_post(self,", "mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout =", "np.exp(2.0 * self.sf) psi1, psi2 = compute_psi_weave( 2 * self.ls, 2 * self.sf,", "(int): Description updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE):", "\"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE):", "# var_new_parallel = 1.0 / n1_damped # mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs,", "logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm(", "* self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1", "dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx =", "(1 - alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 / cav_next_2,", "post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :]", "= p_i[:, np.newaxis, :] * dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi,", "= np.zeros((inputs.shape[0], self.Dout, K)) # # TODO: remove for loop here # for", "alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v,", "Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat def update_posterior(self): \"\"\"Summary Returns: TYPE: Description", "np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N,", "Description \"\"\" # compute the leave one out moments t1n = self.t1[n, :,", "post_2[0, :] += self.x_prior_2 vx = 1.0 / post_2 mx = post_1 /", "# natural params for latent variables N = self.N Din = self.Din self.x_prev_1", "n1_ori = 1.0 / variance_i # n2_ori = mean_i / variance_i # n1_damped", "muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda',", "prop_info = self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav,", "extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\", "for non real-valued data post_m = PCA_reduce(y_train, self.Din) post_m_mean = np.mean(post_m, axis=0) post_m_std", "compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout", "alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class SGPLVM(Base_SGPLVM): \"\"\"Summary", "xb = self.x_train yb = self.y_train else: idxs = np.random.choice(N, mb_size, replace=False) xb", "= 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE):", "Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2", "phi_post + scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0):", "self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf,", "grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din", "(TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (TYPE): Description Returns: TYPE:", "* \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args:", "self.Dout * 0.5 * logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description", "= SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural params for latent", "0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior", "range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1, self.Din]) (mn, vn,", "prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib = scale_logZ * np.sum(logZ)", "self.t1[n, :, :] = t1_new self.t2[n, :, :, :] = t2_new # TODO:", "= self.t2[n, :, :, :] Suinvhat = self.Suinv - alpha * t2n SuinvMuhat", "lik_contrib = scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm dv_s = scale_logZ", "np.arange(0, self.N - 1) cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new", "+ np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha,", "# axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n = %d'", "np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i,", "(TYPE): Description \"\"\" self.Din = Din = input_size self.Dout = Dout = output_size", "cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :]", "Returns: TYPE: Description \"\"\" # dict to hold hypers, inducing points and parameters", "= self.t1[n, :, :] t2n = self.t2[n, :, :, :] Suinvhat = self.Suinv", "# Returns: # TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.get_hypers() # emi_params", "= d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER * np.ones(self.M))) grads =", "self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf,", "energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ", "init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns:", "\"\"\"Summary Args: alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N", "= np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 =", "N = self.N Nb = idxs.shape[0] sf2 = np.exp(2 * self.sf) Dout =", "n1_new + (1.0 - rho) * n1_ori # n2_damped = rho * n2_new", "= np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] = sf params['ls' + key_suffix]", "'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha", "update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary Args:", ":] cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :] cav_next_2 += (1 -", "M, M]) # TODO self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M,", "extra_res) else: # parallel update for entire dataset # TODO: minibatch parallel idxs", "scipy.linalg as npalg from scipy import special from scipy.optimize import minimize import matplotlib.pyplot", "TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() #", "term1 = 0.5 * (np.sum(logdetSu) - Dout * logdetKuu + np.sum(mu * Suinvm))", "frac_t1 = new_p1 - cav_m / cav_v # neg_idxs = np.where(frac_t2 < 0)", "(TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description updated (bool): Description x_train (TYPE):", "= self.x_next_1 + self.x_prev_1 + self.x_up_1 post_2 = self.x_next_2 + self.x_prev_2 + self.x_up_2", "m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute", "(str, optional): Description Returns: TYPE: Description \"\"\" params = {} M = self.M", "'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew)", "/ (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode", ":, :] self.mu = munew[0, :, :] self.Suinv = Suinvnew[0, :, :, :]", "merge info from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP, alpha) if not", "N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size =", "self.Kuuinv, np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional):", "return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x, alpha): \"\"\"Summary Args: n", "dmcav = np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_cav = {'mcav': dmcav, 'vcav':", "not self.gp_emi: # only do this once at the begining for gaussian emission", "(float, optional): Description Returns: TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_cav(n,", "self.N - 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs,", "for n in range(0, self.N - 1): # deal with the dynamics factors", "alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self,", "alpha, parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps)", "v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay)", "Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav,", "deletion p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii =", "t2 (TYPE): Description zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo):", "+= (1 - alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 /", "self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1", "= compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv)", "= self.y_train[idxs, :] for k in range(no_sweeps): if k % display_steps == 0:", "m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v", "dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls =", "(1 - alpha) * cur_t1 + frac_t1 tx2_new = (1 - alpha) *", "Description vx (None, optional): Description Returns: TYPE: Description \"\"\" if vx is None:", "Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" N = self.N M", "Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu + np.einsum('dab,db->da', Lu,", "\"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout *", "vyn # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description", "new_n2 = 1.0 / new_v new_n1 = new_n2 * new_m frac_n2 = new_n2", "dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav,", "= cav_v * cav_x1 return cav_m, cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha,", "f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns:", "* 1.0 / batch_size / alpha xb = self.x_train[idxs, :] yb = self.y_train[idxs,", "Description N (TYPE): Description sf (int): Description Splusmm (TYPE): Description Su (TYPE): Description", "T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma", "TYPE: Description \"\"\" N = self.N M = self.M ls = np.exp(self.ls) sf2", "= SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args:", "dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2)", "extra_args (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (float, optional): Description", "self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1 - alpha) * self.x_up_2", "mu, mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Vmm, Kuuinv)),", "# numpy variable for inducing points, Kuuinv, Kuu and its gradients self.zu =", "% n[0]) # plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns:", "= no_train # factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N,", "kfu, kfu) vout = kff + Bpsi2 return mout, vout # TODO def", "* m_diff**2 / v_sum**2 dmt = m_diff / v_sum dmprop = m_diff /", "Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet", "no_train (TYPE): Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\"", "class SGPLVM(Base_SGPLVM): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description", "self.t1[n, :, :] t2_old = self.t2[n, :, :, :] t1_new = (1.0 -", "sf2, x, self.zu) dls = dls * np.exp(self.ls) dsf2 += np.sum(dv) dsf =", "dm, dm2, dv, x, alpha, prop_info): N = self.N Nb = idxs.shape[0] sf2", "prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn,", "alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE): Description dv", "N * 1.0 / alpha - 1.0 scale_cav = - N * 1.0", "...' def compute_cavity_x(self, n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description", "TYPE: Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat", "(TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" # prior factor cav_x1", "extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x(", "Description alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode", "dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else: # parallel", "updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2", "np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff +", "0: # print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik =", "if new_hypers and x_train is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu)", "its gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M]) self.Kuuinv = np.zeros([M,", "yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None,", "= \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v,", "\"\"\" params = {} M = self.M Din = self.Din Dout = self.Dout", "TYPE: Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx)", "def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my, vy,", "(TYPE): Description # Returns: # TYPE: Description # \"\"\" # if not self.updated:", "(TYPE): Description vx (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv = self.Kuuinv A", "Suhat = np.linalg.inv(Suinvhat) muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat) return muhat, Suhat, SuinvMuhat, Suinvhat", "KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls,", "params_zu_i = self.zu params['zu' + key_suffix] = self.zu return params def update_hypers(self, params,", "for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def compute_phi_prior(self): \"\"\"Summary", "alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N = self.N lik_contrib = scale_logZ *", "1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means", "cur_t2 = self.tx2[n, :] tx1_new = (1 - alpha) * cur_t1 + frac_t1", "n1_damped = rho * n1_new + (1.0 - rho) * n1_ori # n2_damped", "Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu *", "- alpha * t2n SuinvMuhat = self.SuinvMu - alpha * t1n Suhat =", "if mb_size >= N: idxs = np.arange(N) xb = self.x_train yb = self.y_train", "= sgp_grad[key] for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for", "dvcav, muhat) \\ + np.einsum('ab,ndb->nda', Kuuinv, dAhat) grad_hyper = {} grad_input = {'mx':", "n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ =", "Description \"\"\" dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m + cav_v", "mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None, optional): Description Returns: TYPE:", "muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\ a[0], a[1],", "self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def predict_f(self, inputs): # \"\"\"Summary #", "(1.0 - rho) * n2_ori # var_new_parallel = 1.0 / n1_damped # mean_new_parallel", "vout = kff + Bpsi2 return mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx,", "return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10): if parallel:", "return self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n,", "Dout = self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf", "alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor(", "decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m, cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt", "self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch,", "prior_mean / prior_var self.t02 = 1.0 / prior_var # TODO: alternatitve method for", "M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description t02", "decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args", "x, alpha, prop_info): N = self.N Nb = idxs.shape[0] sf2 = np.exp(2 *", "* dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi", "np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the", "for the hyperparameters self.ls = np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x):", "* np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary Args: alpha (float, optional):", "cur_n2 + (1 - decay) * n2_new elif mode == self.PREV: idxs =", "\"\"\" mx, vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my,", "cav_v, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, n, alpha): \"\"\"Summary", "= kff + Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary Args: x", "np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew = Suhat - np.einsum( 'ndab,ndbc->ndac',", "True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) #", "no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0,", "variable for inducing points, Kuuinv, Kuu and its gradients self.zu = np.zeros([M, Din])", "- cav_m / cav_v # neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] =", ":] energy = {} grad_all = {} for k in range(no_sweeps): # if", "t1_new self.t2[n, :, :, :] = decay * t2_old + (1 - decay)", "\"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn':", "Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff + Bkfukuf extra_res", "return m_si_i, v_si_ii, [h, beta_si, gamma_si] def update_factor(self, idxs, mcav, vcav, dm, dm2,", "np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2 =", "extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( self.NEXT, grad_cav['mx'],", "v_sum + 0.5 * m_diff**2 / v_sum**2 dmt = m_diff / v_sum dmprop", "= np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity += 0.5 * \\ np.sum(self.muhat", "self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha)", "else: if N < 10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind", "= np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew = Suhat - np.einsum(", "True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) #", "SGP_Layer( self.N, self.Din + self.Dcon_emi, self.Dout, self.M) # natural params for latent variables", "-0.5 / alpha * np.log(oneminuswnSwn) term2 = N / Nb * np.sum(term2a +", "compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ", "0.5 * \\ np.sum(self.muhat * np.linalg.solve(self.Suhat, self.muhat)) return phi_cavity def compute_phi(self, alpha=1.0): \"\"\"Summary", "(TYPE): Description sgp_layer (TYPE): Description t01 (TYPE): Description t02 (TYPE): Description tx1 (TYPE):", "= cav_v - cav_v**2 * (dmx**2 - 2 * dvx) new_p2 = 1.0", "\"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation self.Suinv = self.Kuuinv", "- np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm", "= Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan',", "np.arange(self.N - 1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha)", "variables self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M, M]) #", "np.zeros([M, M]) self.Kuuinv = np.zeros([M, M]) # variables for the hyperparameters self.ls =", "p_i) wnScavSinvm = np.sum(wnScav * Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn", "extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res", "NotImplementedError('unknown mode') def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 +", "0: print 'epoch %d/%d' % (e, no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else:", "np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i,", "# factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) #", "\"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description lik (str,", "= self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior + scale_post *", "+ self.x_next_2[idxs, :] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 +=", "# TODO: put this in config parallel = True # TODO: put this", "self.Din)) self.t01 = prior_mean / prior_var self.t02 = 1.0 / prior_var # TODO:", "(TYPE): Description extra_args (TYPE): Description decay (int, optional): Description Returns: TYPE: Description \"\"\"", "self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn,", "def predict_y(self, inputs): # \"\"\"Summary # Args: # inputs (TYPE): Description # Returns:", "y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE):", "= up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1", "\"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE):", "# dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm,", "\"\"\" # dict to hold hypers, inducing points and parameters of q(U) N", "dm - 2 * dv * m dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat", "logdetKuu + np.sum(mu * Suinvm)) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :]", "(TYPE): Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn) v_sum =", "exp_term = -0.5 * m_diff**2 / v_sum const_term = -0.5 * np.log(2 *", "np.ones(self.M))) grads = {} grads['sf'] = 2*dhyp[0] + dsf grads['ls'] = 2*dhyp[1] +", "(TYPE): Description lik_layer (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE):", "+ t1_frac t2_new = (1.0 - alpha) * t2_old + t2_frac if t1_new.shape[0]", "grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input", "+ key_suffix] = self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params", "TYPE: Description \"\"\" # prior factor cav_x1 = self.t01 + (1 - alpha)", "lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM):", "== self.NEXT: cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :]", "np.zeros((N, Din)) self.x_next_2 = np.zeros((N, Din)) self.x_up_1 = np.zeros((N, Din)) self.x_up_2 = np.zeros((N,", "1.0 / post_2 mx = post_1 / post_2 return mx, vx def get_posterior_y(self):", "(TYPE): Description t2 (TYPE): Description zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size,", "Suinvhat, Smm, psi1, psi2, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4],", "Description \"\"\" # merge info from output cav_up_m, cav_up_v, _, _ = self.compute_cavity_x(self.UP,", "Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N,", "= np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) -", "* np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE:", "= self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new =", "# init_params.update(lik_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns: #", "- decay) * n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav,", "mb_size >= N: idxs = np.arange(N) xb = self.x_train yb = self.y_train else:", "dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None,", "* dmx new_v = cav_v - cav_v**2 * (dmx**2 - 2 * dvx)", "* t2_old + t2_frac if t1_new.shape[0] == 1: # TODO: do damping here?", "Returns: TYPE: Description \"\"\" # prior factor cav_x1 = self.t01 + (1 -", "np.newaxis, :] * dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2", "+ self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0", "yb, alpha, compute_dm2=True) # dm *= 0 # dm2 *= 0 # dm2", "return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode')", "dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi", "'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description alpha", "= SuinvMunew[0, :, :] else: # parallel update self.t1[n, :, :] = decay", "self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 =", "* self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B,", "kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2", "- alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha == 1: #", ":] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls, dzu =", "Description v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE): Description", "Din (TYPE): Description Dout (TYPE): Description emi_layer (TYPE): Description lik (TYPE): Description M", "(TYPE): Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train,", "cav_v, cav_x1, cav_x2 def update_factor_x(self, n, alpha, grad_cav, cav_m, cav_v, decay=0.0): \"\"\"Summary Args:", "Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary", "mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE):", "= np.random.randn(x.shape[0], self.Dout) f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def", "scale_logZ = - self.N * 1.0 / batch_size / alpha xb = self.x_train[idxs,", "elif mode == self.PREV: idxs = np.arange(1, self.N) cur_n1 = self.x_prev_1[idxs, :] cur_n2", "phi_posterior = 0.5 * np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su,", "m_diff = m_t - m_prop exp_term = -0.5 * m_diff**2 / v_sum const_term", "+ term2c + term2d) sgp_contrib = - term1 - term2 KuuinvMcav = np.einsum('ab,ndb->nda',", "Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat, SuinvMuhat, Suinvhat =", "self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all", "logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 * np.sum(logdet) phi_cavity", "* from kernels import * from lik_layers import Gauss_Layer, Probit_Layer, Gauss_Emis from base_models", "self.tx1[n, :] cav_x2 = self.t02 + (1 - alpha) * self.tx2[n, :] cav_v", "axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls", "= up_1 self.x_up_2 = up_2 # deal with the dynamics factors here cav_t_m,", "np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv", "- rho) * n1_ori # n2_damped = rho * n2_new + (1.0 -", "= self.Kuuinv # compute grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm,", "- np.sum(k_i[:, np.newaxis, :] * h, axis=2) mean_i_new = mcav - dm /", "self.beta h_si = p_i - np.einsum('dab,nb->nda', beta, k_i) variance_i = self.variances[idxs, :] mean_i", "grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m,", "alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) #", "muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0", "(TYPE): Description mx (TYPE): Description vx (None, optional): Description alpha (float, optional): Description", "key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" # dict to hold hypers,", "= np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 *", "# dm2 *= 0 # dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor(", "# n1_ori = 1.0 / variance_i # n2_ori = mean_i / variance_i #", "= Suhat - np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew", "plt.show() def sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\"", "cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description", "scipy.optimize import minimize import matplotlib.pyplot as plt import time import pdb from scipy.cluster.vq", "def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args: m (TYPE):", "= np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv, Kuu and", "+ key_suffix] = ls params['zu' + key_suffix] = zu return params def get_hypers(self,", "pep variables self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) # numpy", "dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx, vx, zu)", "Description Returns: TYPE: Description \"\"\" # dict to hold hypers, inducing points and", ":] yb = self.y_train[idxs, :] for k in range(no_sweeps): if k % display_steps", "Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm = Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat", "def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description \"\"\" # update kuu and kuuinv ls", "variance_i # n2_ori = mean_i / variance_i # n1_damped = rho * n1_new", "* phi_post + scale_cav * phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None,", "grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs,", "dv_s = scale_logZ * dv dm2_s = scale_logZ * dm2 sgp_contrib, sgp_grad =", "# \"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True #", "Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik (str, optional):", "# scale_logZ = 0 xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] energy", "logdet) = np.linalg.slogdet(self.Kuu) logZ_prior = self.Dout * 0.5 * logdet return logZ_prior def", "= self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior * phi_prior", "lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din", "dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu,", "self.PREV: cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] +", "ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1])) zu", "new hypers self.compute_kuu() # compute mu and Su for each layer self.update_posterior() class", "alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description", "self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf #", "= exp_term + const_term + alpha_term dvt = -0.5 / v_sum + 0.5", "/ new_v new_p1 = new_p2 * new_m frac_t2 = new_p2 - 1.0 /", "lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE):", "# if not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # K =", "Description extra_args (TYPE): Description x (TYPE): Description alpha (float, optional): Description Returns: TYPE:", "dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args,", "= self.lik_layer.output_probabilistic(mf, vf) # return my, vy # def update_hypers(self, params): # \"\"\"Summary", "S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu, S, Kuu - np.diag(JITTER", "cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n,", "Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer =", "dls grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes:", "= self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] # compute", "muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat = \\ a[0], a[1], a[2], a[3],", "with the dynamics factors here cav_t_m, cav_t_v, cav_t_1, cav_t_2 = \\ self.compute_cavity_x(self.PREV, alpha)", "no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size =", "(TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description decay (int,", "beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs,", "= \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt =", "1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop,", "self.x_prior_1 post_2[0, :] += self.x_prior_2 vx = 1.0 / post_2 mx = post_1", "= temp1 + temp2 + temp3 dKuu_via_logZ = dKuu_via_mi + dKuu_via_vi dKfu_via_mi =", "replace=False) xb = self.x_train[idxs, :] yb = self.y_train[idxs, :] # update model with", "no_epochs) if parallel: self.inf_parallel(e, alpha, decay) else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print", "grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n],", "/ post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx,", "= np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 *", "# ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params)", "n, alpha): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description", "minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _, _", "post_1 / post_2 return mx, vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\"", "return mf, vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary # Args: #", "self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer_rank_one(self.N, self.Din, self.Dout, self.M) def objective_function(self, params,", "Args: n (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha (TYPE): Description", "scipy import special from scipy.optimize import minimize import matplotlib.pyplot as plt import time", "Suinvhat = self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Smm", "var_new_parallel # if alpha == 1: # rho = 0.5 # n1_new =", "key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix (str, optional): Description Returns: TYPE: Description", "Kuuinv = self.Kuuinv a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2,", "zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def update_posterior(self, x_train=None,", "lik_contrib grad_all = {} for key in sgp_grad.keys(): grad_all[key] = sgp_grad[key] for key", "self.Kuuinv + T2u self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su,", "= 0.5 * (1 - alpha) * np.log(2 * np.pi * sn2) -", "* t1_old + (1 - decay) * t1_new self.t2[n, :, :, :] =", "n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav (TYPE): Description dvcav", "- alpha) * cur_t1 + frac_t1 tx2_new = (1 - alpha) * cur_t2", "objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description", "self.x_prev_2 + self.x_up_2 post_1[0, :] += self.x_prior_1 post_2[0, :] += self.x_prior_2 vx =", "extra_res def _forward_prop_random_thru_cav_mm(self, n, mx, vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx", "+ self.Dcon_emi, self.Dout, self.M) # natural params for latent variables N = self.N", "scale_logZ * dm dv_s = scale_logZ * dv dm2_s = scale_logZ * dm2", "SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M", "Description \"\"\" logZ_posterior = 0 (sign, logdet) = np.linalg.slogdet(self.Suhat) phi_cavity = 0.5 *", "Args: # inputs (TYPE): Description # Returns: # TYPE: Description # \"\"\" #", "dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description mx (TYPE): Description vx", "optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size,", "axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :],", "k % display_steps == 0: print 'PEP, epoch: %d / %d' % (k,", "+= self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16),", "(TYPE): Description n2cav (TYPE): Description idxs (TYPE): Description decay (float, optional): Description alpha", "kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls * np.exp(self.ls) dsf2 += np.sum(dv)", "up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2", "dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav =", "np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train)", "# TYPE: Description # \"\"\" # sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers()", "self.sn = params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description", "KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u", "vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn = \\", "self.N - 1): # deal with the dynamics factors here cav_t_m, cav_t_v, cav_t_1,", "cav_m_n, cav_v_n, decay=decay) else: # parallel update for entire dataset # TODO: minibatch", "# Args: # params (TYPE): Description # Returns: # TYPE: Description # \"\"\"", "- 1) cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :]", "= compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu,", "vx def get_posterior_y(self): \"\"\"Summary Returns: TYPE: Description \"\"\" mx, vx = self.get_posterior_x() my,", "= \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn,", "grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( mn, vn, dmn, dvn, extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor(", "self.x_prior_2 = 1.0 / prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def", ":] cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2,", "% (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) m_cav, v_cav, prop_info", "mcav) dmiKuuinvMcav = dm[:, :, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i)", "entire dataset # TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs, :]", "np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu) - Dout", "= p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii =", "+ 0.5 * m_diff**2 / v_sum**2 dmt = m_diff / v_sum dmprop =", "update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE):", "- N * 1.0 / alpha scale_prior = 1 phi_prior = self.compute_phi_prior() phi_post", "= self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params)", "inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew", "signKuu, logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 *", "(str, optional): Description Returns: TYPE: Description \"\"\" self.ls = params['ls' + key_suffix] self.sf", "prop_info[2] kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :]", "np.newaxis] * (-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1)", "= self.x_train[idxs, :] yb = self.y_train[idxs, :] for k in range(no_sweeps): if k", "= var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m, v, dm, dm2,", "+= (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 - alpha) *", "= (1 - alpha) * cur_t2 + frac_t2 tx1_new = decay * cur_t1", "mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my,", "(mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn", "self._forward_prop_deterministic_thru_cav(n, mx, alpha) else: return self._forward_prop_random_thru_cav_mm(n, mx, vx, alpha) def _forward_prop_deterministic_thru_cav(self, n, x,", "0 # dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav,", "idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik: N", "grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm( mprop, vprop, dmprop, dvprop, extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor(", "- alpha) * self.tx1[n, :] cav_x2 = self.t02 + (1 - alpha) *", "self.sgp_layer.sample(inputs) # return fs # def predict_y(self, inputs): # \"\"\"Summary # Args: #", "np.einsum('ndab,nab->nd', Bhat, psi2) vout = psi0 + Bhatpsi2 - mout**2 extra_res = [muhat,", "numpy variable for inducing points, Kuuinv, Kuu and its gradients self.zu = np.zeros([M,", "np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1", "decay) * n2_new elif mode == self.PREV: idxs = np.arange(1, self.N) cur_n1 =", "= self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 - alpha) *", "\\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y,", "self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self,", "alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 # deal with the dynamics factors", "compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat)", "alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha, grad_cav, cav_m_n, cav_v_n, decay=decay) else: #", "and parameters of q(U) N = self.N M = self.M Din = self.Din", "(int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean,", "- rho) * n2_ori # var_new_parallel = 1.0 / n1_damped # mean_new_parallel =", "self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self, idxs, no_sweeps, alpha, compute_energy, display_steps):", "NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N,", "Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" sn2 = np.exp(2 * self.sn)", "dv, x, alpha, prop_info): N = self.N Nb = idxs.shape[0] sf2 = np.exp(2", "0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if", "self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :]", "* np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior", "vx = self.get_posterior_x() my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn", "extra_res, cav_tm1_m, cav_tm1_v, alpha=alpha) self.dyn_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'],", "inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay", "np.exp(2 * self.sf) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)", "def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav, n1cav, n2cav, decay=0.0, alpha=1.0): \"\"\"Summary", "= np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv = self.Kuuinv + T2u self.Suinv =", "- np.einsum( 'ndab,ndbc->ndac', Suhat, np.einsum('ndab,ndbc->ndac', inner, Suhat)) Suinvnew = np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda',", "Bhat dm_all = dm - 2 * dv * m dAhat = np.einsum('nd,nm->ndm',", "- 1.0 scale_cav = - N * 1.0 / alpha scale_prior = 1", "grad_hyper, grad_input def backprop_grads_reg(self, m, v, dm, dv, extra_args, x, alpha=1.0): \"\"\"Summary Args:", "self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode", "vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def predict_f(self, inputs): #", "display_steps) else: # TODO return self.run_pep_sequential( train_idxs, no_sweeps, alpha, compute_energy, display_steps) def run_pep_parallel(self,", "+= self.x_prior_2 vx = 1.0 / post_2 mx = post_1 / post_2 return", "NotImplementedError: Description \"\"\" super(SGPR, self).__init__(x_train, y_train, no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout,", "= dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis] * (-p_i[:, np.newaxis, :]", "= mcav - dm / dm2 var_new_parallel = 1 / (1 / var_i_new", "grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps): batch_size = idxs.shape[0] scale_logZ =", "self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' class", "tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1", "logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg(", "= True # K = no_samples # fs = np.zeros((inputs.shape[0], self.Dout, K)) #", "extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew =", "and kuuinv ls = self.ls sf = self.sf Dout = self.Dout M =", "= 1 / (1 / var_i_new + 1 / variance_i * (1 -", "lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params for", "Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1 (TYPE): Description t2 (TYPE): Description", "mf + np.einsum('ab,bd->ad', Lf, epsilon) return f_sample def compute_kuu(self): \"\"\"Summary Returns: TYPE: Description", "Description dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description vcav (TYPE): Description", "display_steps == 0: print 'PEP, epoch: %d / %d' % (k, no_sweeps) find_log_lik", "# dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info)", "* phi_cav return phi def forward_prop_thru_cav(self, n, mx, vx=None, alpha=1.0): \"\"\"Summary Args: n", "Description # Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.lik_layer.update_hypers(params) #", "(mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop,", "= params['sn'] # def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description #", "= dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :,", "[n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x_sequential( self.NEXT, grad_cav['mx'], grad_cav['vx'], cav_tm1_m, cav_tm1_v, cav_tm1_1, cav_tm1_2,", "M]) # numpy variable for inducing points, Kuuinv, Kuu and its gradients self.zu", "else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args:", "Su = self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M)", "cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary", "dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi = np.einsum('ab,ndb->nda',", "= up_1 self.x_up_2 = up_2 else: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha)", "Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description mx (TYPE): Description", "alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac',", "Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True):", "up_1 self.x_up_2 = up_2 # deal with the dynamics factors here cav_t_m, cav_t_v,", "* sn2) - 0.5 * np.log(alpha) logZ = exp_term + const_term + alpha_term", "TYPE: Description # \"\"\" # if not self.updated: # self.sgp_layer.update_posterior() # self.updated =", "Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2.0 * self.sf) psi1, psi2", "= self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs, :]", "logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm", "M = no_pseudo self.N = N = no_train # factor variables self.t1 =", "var_new_parallel # n2_new = mean_new_parallel / var_new_parallel # n1_ori = 1.0 / variance_i", "- 1.0 / cav_v frac_t1 = new_p1 - cav_m / cav_v # neg_idxs", "grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n],", "self.NEXT: cur_n1 = self.x_next_1[idxs, :] cur_n2 = self.x_next_2[idxs, :] n1_new = (1 -", "(bool): Description x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary", "parallel, compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else:", "np.linalg.inv(Sunew) SuinvMunew = np.einsum('ndab,ndb->nda', Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac =", "{} grad_cav = {'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n,", "* n1_new self.x_next_2[idxs, :] = decay * cur_n2 + (1 - decay) *", "+ frac_n2 self.x_prev_1[idxs, :] = decay * cur_n1 + (1 - decay) *", "x_train, y_train, no_pseudo, lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo", "self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted(", "(TYPE): Description zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary", "cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1, cav_prev_2 elif mode == self.NEXT: cav_next_1", "beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si) v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i,", "only do this once at the begining for gaussian emission lik if isinstance(self.emi_layer,", "parallel idxs = np.arange(self.N) y = self.y_train[idxs, :] cav_m, cav_v, _, _ =", "mode (TYPE): Description dmcav (TYPE): Description dvcav (TYPE): Description mcav (TYPE): Description vcav", "lik if isinstance(self.emi_layer, Gauss_Layer) and epoch == 0: up_1, up_2 = self.emi_layer.compute_factor( cav_up_m,", "(TYPE): Description n1cav (TYPE): Description n2cav (TYPE): Description decay (float, optional): Description alpha", "m_cav, v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib grad_all =", "\"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description v_t (TYPE):", "- alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2, cav_prev_1,", "np.log(d2imed + 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] =", "= self.Kuuinv A = np.einsum('ab,db->da', Kuuinv, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac',", "(1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode,", "logZ, dmprop, dvprop, dmt, dvt def update_factor_x( self, mode, dmcav, dvcav, mcav, vcav,", "logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg(", "kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) qfu = np.dot(kfu,", "in range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs # def", "hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer( self.N -", "= compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u =", "factor variables self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M, M])", "self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0.5): \"\"\"Summary Args: alpha (float, optional):", "TODO self.mu = np.zeros([Dout, M, ]) self.Su = np.zeros([Dout, M, M]) self.SuinvMu =", "- post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0 /", "* phi_prior + scale_post * phi_post + scale_cav * phi_cav return phi def", "update model with new hypers self.update_hypers(params) # run power-EP and compute grads no_ep_sweeps", "= M = no_pseudo self.N = N = no_train # factor variables self.t1", "lik='Gaussian'): \"\"\"Summary Args: x_train (TYPE): Description y_train (TYPE): Description no_pseudo (TYPE): Description lik", "parallel = True # TODO: put this in config energy, grad_all = self.run_pep(idxs,", "Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description extra_args (TYPE): Description", "mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel", "M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description updated (bool): Description x_train", "yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) #", "# n2_ori = mean_i / variance_i # n1_damped = rho * n1_new +", "(TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description M (TYPE): Description mu (TYPE):", "= np.exp(2 * self.sf) zu = self.zu Kuuinv = self.Kuuinv a = extra_args", "the posterior approximation self.Suinv = self.Kuuinv + np.sum(self.t2, axis=0) self.SuinvMu = np.sum(self.t1, axis=0)", "= -0.5 * np.log(2 * np.pi * v_sum) alpha_term = 0.5 * (1", "* self.x_next_1[idxs, :] cav_next_2 += (1 - alpha) * self.x_next_2[idxs, :] return cav_next_1", "= np.einsum('na,ndab->ndb', p_i, Sucav) wnScavwn = np.einsum('ndb,nb->nd', wnScav, p_i) wnScavSinvm = np.sum(wnScav *", "return mout, vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx, alpha=1.0):", "compute_cavity_x_sequential(self, mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description alpha", "0.5 # n1_new = 1.0 / var_new_parallel # n2_new = mean_new_parallel / var_new_parallel", ":, :] = t2_new # TODO: update posterior self.Su = Sunew[0, :, :,", "cav_tm1_2, decay=decay, alpha=alpha) self.update_factor_x( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha)", "(mn, vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn =", ":] mean_i = self.means[idxs, :] # compute cavity covariance betacavKuu = np.einsum('ndab,bc->ndac', beta_si,", "= np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1 and psi2 dpsi1 =", "dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay):", "(TYPE): Description mu (TYPE): Description N (TYPE): Description sf (int): Description Splusmm (TYPE):", "= self.sgp_layer.init_hypers() # lik_params = self.lik_layer.init_hypers() # init_params = dict(sgp_params) # init_params.update(lik_params) #", "mu + np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls, 2 * self.sf,", "# dict to hold hypers, inducing points and parameters of q(U) N =", "my, vy, vyn = self.emi_layer.output_probabilistic(mx, vx) return my, vy, vyn # def predict_f(self,", "(cav_up_2 + 1e-16), 1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode ==", "variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] var_i_new = -1.0 / dm2", "np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu = compute_kernel(2", "* t1_old + t1_frac t2_new = (1.0 - alpha) * t2_old + t2_frac", "decay (float, optional): Description alpha (float, optional): Description Returns: TYPE: Description Raises: NotImplementedError:", "self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE):", "cav_v (TYPE): Description decay (float, optional): Description Returns: TYPE: Description \"\"\" dmx =", "1: # rho = 0.5 # n1_new = 1.0 / var_new_parallel # n2_new", "optional): Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" # dict to", "x_up_1 (TYPE): Description x_up_2 (TYPE): Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian',", "- vcav**2 * (dmcav**2 - 2 * dvcav) new_n2 = 1.0 / new_v", "gamma + hd1 beta_si = beta - hd2h # projection h = p_i", "(TYPE): Description sgp_layer (TYPE): Description sn (int): Description updated (bool): Description x_next_1 (TYPE):", "None: ls = np.log(np.ones((Din, )) + 0.1 * np.random.rand(Din, )) sf = np.log(np.array([1]))", "= self.compute_cavity(n, alpha) Kuuinv = self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat =", "# init_params.update(ssm_params) # return init_params # def get_hypers(self): # \"\"\"Summary # Returns: #", "optional): Description prior_mean (int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description", "= self.y_train[idxs, :] # update model with new hypers self.update_hypers(params) # run power-EP", "(TYPE): Description input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din", "def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args:", "sys import math import numpy as np import scipy.linalg as npalg from scipy", "# neg_idxs = np.where(frac_t2 < 0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n,", "np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2", "mout**2 return mout, vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args, mx, vx,", "= 1.0 / cav_x2 cav_m = cav_v * cav_x1 return cav_m, cav_v, cav_x1,", "input_size (TYPE): Description output_size (TYPE): Description no_pseudo (TYPE): Description \"\"\" self.Din = Din", "wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis, :], axis=2)", "= np.triu_indices(N) ls = np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din):", "0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v post_1 = post_2 * post_m", "np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv", "/ (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 = self.x_up_1[idxs,", "dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args: params (TYPE): Description", "dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav} return grad_hyper, grad_input def backprop_grads_reg(self, m,", "m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res,", "v_sum + 0.5 * m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum +", "decay * cur_n2 + (1 - decay) * n2_new elif mode == self.PREV:", "= np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta", "'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 * self.sf) psi1,", "* (1 - alpha) * np.log(2 * np.pi * sn2) - 0.5 *", "hypers, inducing points and parameters of q(U) N = self.N M = self.M", "= self.Kuuinv Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac',", "term2b + term2c + term2d) sgp_contrib = - term1 - term2 KuuinvMcav =", "range(0, self.N - 1): # deal with the dynamics factors here cav_t_m, cav_t_v,", "Suinvnew, munew) t2_frac = Suinvnew - Suinvhat t1_frac = SuinvMunew - SuinvMuhat t1_old", "psi2, ls, sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav", "np.einsum('dab,na,nb->nd', B, kfu, kfu) vout = kff + Bpsi2 return mout, vout #", "Smm, psi1, psi2, Ahat, Bhat = \\ a[0], a[1], a[2], a[3], a[4], a[5],", "no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1, x_control=None, gp_emi=False, control_to_emi=True): \"\"\"Summary Args: y_train (TYPE): Description hidden_size", "my, vy, vyn # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs", "kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return", "+ 1e-16) sf = np.log(np.array([0.5])) params = dict() params['sf' + key_suffix] = sf", "= self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:, np.newaxis, :] *", "np.einsum('nd,nm->ndm', dm, kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv,", "scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s,", "vx, alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (TYPE): Description", "not self.updated: # self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs)", "v_sum**2 dmt = m_diff / v_sum dmprop = m_diff / v_sum return logZ,", "dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu) dls = dls", "raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop", "dKuu_via_mi + dKuu_via_vi dKfu_via_mi = dmiKuuinvMcav dKfu_via_vi = 2 * dv[:, :, np.newaxis]", "# compute the gradients Vmm = Su + np.einsum('da,db->dab', mu, mu) S =", "and compute grads no_ep_sweeps = 10 # TODO: put this in config parallel", "/ cav_next_2, 1.0 / cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_cavity_x_sequential(self,", "/ prior_var self.t02 = 1.0 / prior_var # TODO: alternatitve method for non", "a[0], a[1], a[2], a[3], a[4], a[5], a[6] Kuuinv = self.Kuuinv # compute grads", "else: # parallel update for entire dataset # TODO: minibatch parallel idxs =", "* (dmx**2 - 2 * dvx) new_p2 = 1.0 / new_v new_p1 =", "dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav,", "h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs, :] variance_i =", "Args: epoch (TYPE): Description alpha (TYPE): Description decay (TYPE): Description Returns: TYPE: Description", "hold hypers, inducing points and parameters of q(U) N = self.N M =", "1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi = scale_prior", "for key in lik_grad.keys(): grad_all[key] = lik_grad[key] energy /= N for key in", "= (post_m - post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 =", "cav_tm1_1, cav_tm1_2, [n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2,", "self.sf, x, self.zu) qfu = np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf", "(-p_i[:, np.newaxis, :] + KuuinvVcavKuuinvKufi) dKfu_via_logZ = np.sum(dKfu_via_mi + dKfu_via_vi, axis=1) dsf2, dls,", "# compute grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2", "= compute_psi_weave( 2 * self.ls, 2 * self.sf, mx, vx, self.zu) mout =", "compute_kernel_diag(2*self.ls, 2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab',", "* dv[:, :, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1,", "vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE): Description", "psi2) # compute grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm', dm_all, Ahat)", "optional): Description Returns: TYPE: Description \"\"\" a = extra_args muhat, Suhat, SuinvMuhat, Suinvhat,", "self.N M = self.M ls = np.exp(self.ls) sf2 = np.exp(2 * self.sf) zu", "= np.dot(kfu, self.Kuuinv) mf = np.einsum('nm,dm->nd', qfu, u_sample) vf = kff - np.dot(qfu,", ":] = t1_new self.t2[n, :, :, :] = t2_new # TODO: update posterior", "np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis] gamma_si = gamma + hd1", "%d' % (k, no_sweeps) find_log_lik = compute_energy and (k == no_sweeps-1) m_cav, v_cav,", "Ahat, Bhat] return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx", "= self.N Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din))", "= self.Dout Kuu = self.Kuu Kuuinv = self.Kuuinv Su = self.Su mu =", "scale_cav = - N * 1.0 / alpha scale_prior = 1 phi_prior =", "(TYPE): Description x (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat,", "0.5 * np.log(alpha) logZ = exp_term + const_term + alpha_term dvt = -0.5", "[muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat] return mout, vout, extra_res def _forward_prop_random_thru_cav_mm(self,", "- gn * tn * wnScavSinvm term2c = 0.5 * tn * mwn**2", "forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv =", "and (k == no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity(", "cav_prev_2 += (1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0", "as npalg from scipy import special from scipy.optimize import minimize import matplotlib.pyplot as", "extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav'] # perform Power-EP update munew = muhat", "(TYPE): Description vx (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\"", "= N / Nb * np.sum(term2a + term2b + term2c + term2d) sgp_contrib", "= np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf) T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances) Vinv =", "Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx = compute_psi_derivatives(", "# axs[0].errorbar(self.zu[:, 0]+0.05, self.mu[0, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1,", "rho = 0.5 # n1_new = 1.0 / var_new_parallel # n2_new = mean_new_parallel", "0) # frac_t2[neg_idxs] = 0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :]", "Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self, m, v, dm, dv, extra_args,", "= np.zeros((self.N, self.Din)) self.tx2 = np.zeros((self.N, self.Din)) self.t01 = prior_mean / prior_var self.t02", "- alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 +", "KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i) KuuinvVcavKuuinvKufi =", "epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE): Description decay (TYPE):", "v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params): \"\"\"Summary Args:", "+ (1 - decay) * t1_new self.t2[n, :, :, :] = decay *", "/ var_i_new + mean_i / variance_i * (1 - alpha)) mean_new_parallel = mean_div_var_i_new", "= decay * cur_n1 + (1 - decay) * n1_new self.x_next_2[idxs, :] =", "np.einsum('dab,db->da', self.Su, T1u) self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu) self.beta = self.Kuuinv - np.einsum('ab,dbc->dac',", "/ variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i", "(k == no_sweeps-1) for i in range(batch_size): m_cav, v_cav, prop_info = self.sgp_layer.compute_cavity( [idxs[i]],", "SuinvMuhat t1_old = self.t1[n, :, :] t2_old = self.t2[n, :, :, :] t1_new", "alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description alpha (TYPE):", "< 10000: centroids, label = kmeans2(x_train, M, minit='points') else: randind = np.random.permutation(N) centroids", "S = 0.5 * S + dKuu_via_logZ dhyp = d_trace_MKzz_dhypers( 2*self.ls, 2*self.sf, self.zu,", "np.einsum('nd,ndm->nm', dm_all, Ahat) dpsi2 = np.einsum('nd,ndab->nab', dv, Bhat) dsf2, dls, dzu, dmx, dvx", "optional): Description parallel (bool, optional): Description decay (float, optional): Description Returns: TYPE: Description", "return mx, vx # def predict_f(self, inputs): # \"\"\"Summary # Args: # inputs", "self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params =", "extra_res, cav_m_n, cav_v_n, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( [n], alpha,", "- alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i / variance_i * (1", "(int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPSSM, self).__init__(", "dAhat = np.einsum('nd,nm->ndm', dm_all, psi1) dBhat = np.einsum('nd,nab->ndab', dv, psi2) # compute grads", "variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] # compute cavity covariance betacavKuu", "Description \"\"\" Su = self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon =", "(TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE):", "= cav_m + cav_v * dmx new_v = cav_v - cav_v**2 * (dmx**2", "= scale_logZ * np.sum(logZ) dm_s = scale_logZ * dm dv_s = scale_logZ *", "optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (int, optional):", "np.einsum('nd,nab->ndab', dv, psi2) # compute grads wrt psi1 and psi2 dpsi1 = np.einsum('nd,ndm->nm',", "/ var_new_parallel # n1_ori = 1.0 / variance_i # n2_ori = mean_i /", "def inference(self, alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs", "* cur_n1 + (1 - decay) * n1_new self.x_prev_2[idxs, :] = decay *", "# mean_new_parallel = var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] =", "np.zeros([N, Dout, M, M]) # TODO self.mu = np.zeros([Dout, M, ]) self.Su =", ":, np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav,", "kmeans2 from utils import * from kernels import * from lik_layers import Gauss_Layer,", "cav_next_1 = self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs,", "\"\"\" super(SGPSSM, self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer", "'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE): Description", "0.5 * m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum + 0.5 *", "Description t2 (TYPE): Description zu (TYPE): Description \"\"\" def __init__(self, no_train, input_size, output_size,", "self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted(", "np.zeros([Din, ]) self.sf = 0 def forward_prop_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description", "Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu (TYPE): Description t1", "np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn = 1 - alpha * tn", "{'sn': np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params", "= sf params['ls' + key_suffix] = ls params['zu' + key_suffix] = zu return", "mu and Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha): #", "Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu", "decay * cur_n2 + (1 - decay) * n2_new else: raise NotImplementedError('unknown mode')", "(TYPE): Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE):", "# \"\"\"Summary # Args: # params (TYPE): Description # Returns: # TYPE: Description", "M, ]) self.Suinv = np.zeros([Dout, M, M]) self.Splusmm = np.zeros([Dout, M, M]) #", "= self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy = sgp_contrib + lik_contrib", "np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1, X1, 'euclidean') triu_ind = np.triu_indices(N)", "= self.emi_layer.compute_factor( cav_up_m, cav_up_v, alpha) self.x_up_1 = up_1 self.x_up_2 = up_2 # deal", "vout = kff + Bpsi2 return mout, vout def sample(self, x): \"\"\"Summary Args:", "np.ones_like(post_m) post_2 = 1.0 / post_v post_1 = post_2 * post_m self.tx1 =", "- decay) * t1_new self.t2[n, :, :, :] = decay * t2_old +", "np.log(0.001)} # init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params #", "logdetKuu = np.linalg.slogdet(Kuu) Suinvm = np.einsum('dab,db->da', Suinv, mu) term1 = 0.5 * (np.sum(logdetSu)", "= 2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads class", "= - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1]) temp3 = np.einsum('na,ndb->ab',", "= np.einsum('ab,ndb->nda', Kuuinv, VcavKuuinvKufi) p_idlogZ_dvi = p_i[:, np.newaxis, :] * dv[:, :, np.newaxis]", "= self.x_up_1[idxs, :] + self.x_prev_1[idxs, :] cav_next_2 = self.x_up_2[idxs, :] + self.x_prev_2[idxs, :]", "cav_m, cav_v, decay=0.0): \"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE):", "ssm_params = {'sn': self.sn} # params = dict(sgp_params) # params.update(emi_params) # params.update(ssm_params) #", "projection h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i) m_si_i = np.einsum('na,nda->nd', k_i, gamma_si)", "k in range(no_sweeps): # if k % display_steps == 0: # print 'PEP,", "Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2", "Returns: # TYPE: Description # \"\"\" # self.sgp_layer.update_hypers(params) # self.emi_layer.update_hypers(params) # self.sn =", "extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt", "\"\"\"Summary Args: m (TYPE): Description v (TYPE): Description dm (TYPE): Description dv (TYPE):", "n (TYPE): Description x (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\"", "frac_n1 = new_n1 - n1cav if mode == self.NEXT: cur_n1 = self.x_next_1[idxs, :]", "alpha (float, optional): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m =", "self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor( [idxs[i]], m_cav, v_cav, dm, dm2, dv,", "- alpha) * self.x_next_2[idxs, :] return cav_next_1 / cav_next_2, 1.0 / cav_next_2, cav_next_1,", "cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args: epoch (TYPE):", "cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True, decay=0): \"\"\"Summary", "Description lik (str, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPR_rank_one, self).__init__(x_train, y_train, no_pseudo,", "alpha (TYPE): Description Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" if mode ==", "SuinvMuhat, Suinvhat = \\ extra_args[0], extra_args[1], extra_args[2], extra_args[3] dmcav, dvcav = grad_cav['mcav'], grad_cav['vcav']", "self.t02 + self.tx2 vx = 1.0 / post_2 mx = post_1 / post_2", "beta, k_i) variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0", "Description decay (float, optional): Description Returns: TYPE: Description \"\"\" dmx = grad_cav['mx'] dvx", "is not None: Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T)", "print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode, alpha): \"\"\"Summary Args: mode (TYPE): Description", "/ cav_next_2, cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t,", "compute the posterior approximation if new_hypers and x_train is not None: Kfu =", "cav_up_2 = self.x_prev_2 + self.x_next_2 + \\ (1 - alpha) * self.x_up_2 cav_up_1[0,", "np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description", "alpha, grad_cav, extra_res) else: # parallel update for entire dataset # TODO: minibatch", "cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self, alpha=1.0, no_epochs=10, parallel=True,", "(TYPE): Description t1 (TYPE): Description t2 (TYPE): Description zu (TYPE): Description \"\"\" def", "grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha,", "sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad", ":].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res)", "self.sgp_layer.compute_cavity( [idxs[i]], alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha,", "= np.einsum('ab,ndb->nda', Kuu, gamma_si) Sucav = Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu", "self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args:", "np.einsum('dab,db->da', Lu, epsilon) kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)", "parallel, compute_energy=True) return energy, grad_all def run_pep(self, train_idxs, no_sweeps, alpha, parallel, compute_energy=False, display_steps=10):", "optional): Description decay (int, optional): Description Returns: TYPE: Description \"\"\" try: for e", "n (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" # prior factor", "= self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} # init_params", "p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis, :], axis=2) oneminuswnSwn", "(dmx**2 - 2 * dvx) new_p2 = 1.0 / new_v new_p1 = new_p2", "kfu, kfu) vout = kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat,", "* self.tx1[n, :] cav_x2 = self.t02 + (1 - alpha) * self.tx2[n, :]", "(int, optional): Description prior_var (int, optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train,", "_ = self.compute_cavity_x(self.UP, alpha) if not self.gp_emi: # only do this once at", "compute grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu) dBhat =", "= self.KuuinvKuf[:, idxs].T[:, np.newaxis, :] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis]", "= self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta = self.beta", ":] cav_prev_1 += (1 - alpha) * self.x_prev_1[idxs, :] cav_prev_2 += (1 -", "__init__(self, no_train, input_size, output_size, no_pseudo): \"\"\"Summary Args: no_train (TYPE): Description input_size (TYPE): Description", "= scale_logZ * dm2 sgp_contrib, sgp_grad = self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s,", "self.t01 + (1 - alpha) * self.tx1[n, :] cav_x2 = self.t02 + (1", "alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb[i], alpha, compute_dm2=True) self.sgp_layer.update_factor(", "= 0 cur_t1 = self.tx1[n, :] cur_t2 = self.tx2[n, :] tx1_new = (1", "self.means[idxs, :] var_i_new = -1.0 / dm2 - np.sum(k_i[:, np.newaxis, :] * h,", "dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( mn, vn,", "= kff + Bkfukuf extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat]", "Description \"\"\" self.Din = Din = input_size self.Dout = Dout = output_size self.M", "sf = np.log(np.array([1])) zu = np.tile(np.linspace(-1, 1, M).reshape((M, 1)), (1, Din)) else: if", "= np.copy(x_train) else: randind = np.random.permutation(N) X1 = X[randind[:5000], :] x_dist = cdist(X1,", "+ Bhatpsi2 - mout**2 extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, Smm, psi1, psi2,", "= np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args:", "\\ (1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2 + self.x_next_2 + \\", "range(K): # fs[:, :, k] = self.sgp_layer.sample(inputs) # return fs # def predict_y(self,", "= (1 - alpha) * cur_n2 + frac_n2 self.x_next_1[idxs, :] = decay *", "grad_cav, extra_res) else: # parallel update for entire dataset # TODO: minibatch parallel", "self).__init__( y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var, x_control, gp_emi, control_to_emi) self.dyn_layer = SGP_Layer(", "x, alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE): Description alpha (TYPE): Description", "k % display_steps == 0: # print 'PEP, epoch: %d / %d' %", "self.sf) kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu) mout =", "[n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha) logZ,", "inputs, no_samples=1): # \"\"\"Summary # Args: # inputs (TYPE): Description # no_samples (int,", "prior_var self.UP, self.PREV, self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay):", "Din = self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 =", "# self.sgp_layer.update_posterior() # self.updated = True # mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # return", "cav_tm1_v, alpha=alpha) logZ, dmprop, dvprop, dmt, dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m,", "prior_var=1): \"\"\"Summary Args: y_train (TYPE): Description hidden_size (TYPE): Description no_pseudo (TYPE): Description lik", "self.y_train[idxs, :] energy = {} grad_all = {} for k in range(no_sweeps): #", "self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.emi_layer.output_probabilistic(mf, vf) # return my, vy # def", "temp3 = np.einsum('na,ndb->ab', p_i, p_idlogZ_dvi) dKuu_via_vi = temp1 + temp2 + temp3 dKuu_via_logZ", "Description \"\"\" if vx is None: return self._forward_prop_deterministic_thru_post(mx) else: return self._forward_prop_random_thru_post_mm(mx, vx) def", "variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new / var_i_new + mean_i /", "dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n + 1], decay=decay, alpha=alpha) def inference(self,", "Description Dout (TYPE): Description Kuu (TYPE): Description Kuuinv (TYPE): Description ls (TYPE): Description", "/ dm2 var_new_parallel = 1 / (1 / var_i_new + 1 / variance_i", "sf2, mx, vx, zu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv) dmcav = 2", "alpha m_diff = m_t - m_prop exp_term = -0.5 * m_diff**2 / v_sum", "= N = no_train # factor variables self.variances = np.zeros([N, Dout]) self.variances.fill(1e20) self.means", "h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] k_i = self.Kfu[idxs, :] variance_i =", "* t2n SuinvMuhat = self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat) muhat", "= dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def get_hypers(self): #", "cav_up_2 elif mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :]", "+ const_term + alpha_term dvt = -0.5 / v_sum + 0.5 * m_diff**2", "1) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( idxs, cav_tm1_m, cav_tm1_v, alpha=alpha) logZ, dmprop,", "return mout, vout, extra_res def forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description", "* cur_t2 + (1 - decay) * tx2_new self.tx1[n, :] = tx1_new self.tx2[n,", "mu (TYPE): Description N (TYPE): Description sf (int): Description Splusmm (TYPE): Description Su", "Description updated (bool): Description x_train (TYPE): Description \"\"\" def __init__(self, x_train, y_train, no_pseudo,", "dmt = m_diff / v_sum dmprop = m_diff / v_sum return logZ, dmprop,", "(1 - alpha) * self.tx2[n, :] cav_v = 1.0 / cav_x2 cav_m =", "self.sgp_layer.forward_prop_thru_post(inputs) # return mf, vf # def sample_f(self, inputs, no_samples=1): # \"\"\"Summary #", "\"\"\"Summary Args: n (TYPE): Description alpha (TYPE): Description grad_cav (TYPE): Description cav_m (TYPE):", "self.x_prior_2 vx = 1.0 / post_2 mx = post_1 / post_2 return mx,", "cur_n2 + (1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential(", "grad_hyper = {} grad_input = {'mx': dmx, 'vx': dvx, 'mcav': dmcav, 'vcav': dvcav}", "self.Din self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din))", "x (TYPE): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" a =", "np.einsum('da,db->dab', self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv", "decay) * n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2 =", "no_pseudo, lik) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) def inference(self, alpha=1.0, no_epochs=10, parallel=False,", "== 1: # TODO: do damping here? self.t1[n, :, :] = t1_new self.t2[n,", ":] k_i = self.Kfu[idxs, :] k_ii = self.Kff_diag[idxs][:, np.newaxis] gamma = self.gamma beta", ":]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :, :]))) # axs[0].set_title('n =", "post_1 = post_2 * post_m self.tx1 = post_1 - self.t01 self.tx2 = post_2", "cav_v, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) self.update_factor_x( idxs, alpha, grad_cav, cav_m,", "Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description x (TYPE): Description", "self.sgp_layer.backprop_grads_reg( idxs, m_cav, v_cav, dm_s, dm2_s, dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads(", "Din)) self.x_up_2 = np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0", "'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv kff = np.exp(2 * self.sf) kfu", "cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2 = self.x_up_2[idxs, :] + self.x_next_2[idxs,", "(TYPE): Description tx2 (TYPE): Description updated (bool): Description \"\"\" def __init__(self, y_train, hidden_size,", "update posterior self.Su = Sunew[0, :, :, :] self.mu = munew[0, :, :]", "psi2) vout = psi0 + Bpsi2 - mout**2 return mout, vout def backprop_grads_lvm(self,", "* logdet return logZ_prior def compute_phi_posterior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet)", "+ 0.5 * m_diff**2 / v_sum**2 dvprop = -0.5 / v_sum + 0.5", "= vcav - vcav**2 * (dmcav**2 - 2 * dvcav) new_n2 = 1.0", "Description \"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train", "k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i) return m_si_i, v_si_ii, [h, beta_si, gamma_si] def", "A = np.einsum('ab,db->da', Kuuinv, self.mu) Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu) B", "zu) self.Kuu += np.diag(JITTER * np.ones((M, ))) self.Kuuinv = np.linalg.inv(self.Kuu) def compute_cavity(self, n,", "alpha=1.0, no_epochs=10, parallel=False, decay=0): \"\"\"Summary Args: alpha (float, optional): Description no_epochs (int, optional):", "var_i_new + 1 / variance_i * (1 - alpha)) mean_div_var_i_new = (mean_i_new /", "mf, vf = self.sgp_layer.forward_prop_thru_post(inputs) # my, vy = self.lik_layer.output_probabilistic(mf, vf) # return my,", "+ np.einsum('da,db->dab', mu, mu) S = self.Dout * Kuuinv - np.sum(np.einsum('ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac',", "Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE): Description M (TYPE): Description", "= self.zu self.Kuu = compute_kernel(2 * ls, 2 * sf, zu, zu) self.Kuu", "get_posterior_x(self): \"\"\"Summary Returns: TYPE: Description \"\"\" post_1 = self.x_next_1 + self.x_prev_1 + self.x_up_1", "self.mu, self.mu) B = np.einsum( 'ab,dbc->dac', Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv psi0", "*= 0 # dm2 += 1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav,", "Description sgp_layer (TYPE): Description sn (int): Description updated (bool): Description x_next_1 (TYPE): Description", "alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_lvm( m, v, dm, dv, extra_res, cav_m, cav_v, alpha=alpha)", "alpha): \"\"\"Summary Args: n (TYPE): Description x (TYPE): Description alpha (TYPE): Description Returns:", "Description key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" self.ls = params['ls' +", "mout, vout # TODO def _forward_prop_random_thru_post_mm(self, mx, vx): \"\"\"Summary Args: mx (TYPE): Description", "for k in range(no_sweeps): # if k % display_steps == 0: # print", "Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu) vout = kff + Bkfukuf extra_res =", "alpha=1.0): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (None, optional): Description", "kfu, kfu) vout = kff + Bpsi2 return mout, vout def sample(self, x):", "\"\"\" Su = self.Su mu = self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout,", "1e-16 # dv *= 0 self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha,", "no_train # factor variables self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout,", "self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] + self.x_next_1[idxs, :] cav_prev_2", "* tx2_new self.tx1[n, :] = tx1_new self.tx2[n, :] = tx2_new def get_posterior_x(self): \"\"\"Summary", "* (dmcav**2 - 2 * dvcav) new_n2 = 1.0 / new_v new_n1 =", "\"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description \"\"\" params = {}", "compute_cavity(self, n, alpha=1.0): \"\"\"Summary Args: n (TYPE): Description alpha (float, optional): Description Returns:", "Description vx (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\" muhat, Suhat,", "dmx = grad_cav['mx'] dvx = grad_cav['vx'] new_m = cav_m + cav_v * dmx", "init_params = dict(sgp_params) # init_params.update(lik_params) # init_params.update(ssm_params) # return init_params # def get_hypers(self):", "dls, dzu, dmx, dvx = compute_psi_derivatives( dpsi1, psi1, dpsi2, psi2, ls, sf2, mx,", "v_prop + sn2 / alpha m_diff = m_t - m_prop exp_term = -0.5", "phi = scale_prior * phi_prior + scale_post * phi_post + scale_cav * phi_cav", "alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ) energy =", "v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, cav_m, cav_v, alpha=alpha) logZ, dm, dv =", "sample(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Su =", "(TYPE): Description grad_cav (TYPE): Description cav_m (TYPE): Description cav_v (TYPE): Description decay (float,", "x (TYPE): Description Returns: TYPE: Description \"\"\" Su = self.Su mu = self.mu", "= self.x_prev_1[idxs, :] cur_n2 = self.x_prev_2[idxs, :] n1_new = (1 - alpha) *", "\\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv,", "2*self.sf, x_train, self.zu) KuuinvKuf = np.dot(self.Kuuinv, Kfu.T) self.Kfu = Kfu self.KuuinvKuf = KuuinvKuf", "self.x_prev_1 + self.x_next_1 + \\ (1 - alpha) * self.x_up_1 cav_up_2 = self.x_prev_2", "self.gamma = np.zeros([Dout, M]) self.beta = np.zeros([Dout, M, M]) # numpy variable for", "{} for k in range(no_sweeps): # if k % display_steps == 0: #", "beta_si = beta - hd2h # projection h = p_i - np.einsum('ndab,nb->nda', beta_si,", "def get_hypers(self, key_suffix=''): \"\"\"Summary Args: key_suffix (str, optional): Description Returns: TYPE: Description \"\"\"", "dvt = \\ self.compute_transition_tilted( mprop, vprop, cav_t_m, cav_t_v, alpha) grad_hyper, grad_cav = self.dyn_layer.backprop_grads_lvm(", "def update_hypers(self, params): # \"\"\"Summary # Args: # params (TYPE): Description # Returns:", "# def init_hypers(self): # \"\"\"Summary # Returns: # TYPE: Description # \"\"\" #", "post_v post_1 = post_2 * post_m self.tx1 = post_1 - self.t01 self.tx2 =", "= Kuu - np.einsum('ab,ndbc->ndac', Kuu, betacavKuu) signSu, logdetSu = np.linalg.slogdet(Su) signKuu, logdetKuu =", "self.NEXT = 'UP', 'PREV', 'NEXT' def inf_parallel(self, epoch, alpha, decay): \"\"\"Summary Args: epoch", "cav_next_1, cav_next_2 else: raise NotImplementedError('unknown mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha):", "Args: mx (TYPE): Description vx (None, optional): Description Returns: TYPE: Description \"\"\" if", "(None, optional): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" if vx", "= self.Dout params['ls' + key_suffix] = self.ls params['sf' + key_suffix] = self.sf params_zu_i", "TODO: this should reuse base models! \"\"\" import sys import math import numpy", "self.x_train[idxs, :] (m, v, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( idxs, x, alpha=alpha) logZ, dm,", "(float, optional): Description no_epochs (int, optional): Description parallel (bool, optional): Description decay (float,", "for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) xn = self.x_train[n, :].reshape([1,", "= self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha, compute_dm2=True) # dm *= 0 # dm2", "post_m_std = np.std(post_m, axis=0) post_m = (post_m - post_m_mean) / post_m_std post_v =", "- Kuuinv psi0 = np.exp(2 * self.sf) psi1, psi2 = compute_psi_weave( 2 *", "forward_prop_thru_post(self, mx, vx=None): \"\"\"Summary Args: mx (TYPE): Description vx (None, optional): Description Returns:", ":, :, :] = decay * t2_old + (1 - decay) * t2_new", "sgp_params = self.sgp_layer.init_hypers() # lik_params = self.emi_layer.init_hypers() # ssm_params = {'sn': np.log(0.001)} #", "= self.sgp_layer.compute_cavity(idxs, alpha) logZ, dm, dv, dm2 = self.lik_layer.compute_log_Z( m_cav, v_cav, yb, alpha,", "grad_all[key] /= N return energy, grad_all def run_pep_sequential(self, idxs, no_sweeps, alpha, compute_energy, display_steps):", "Description mx (TYPE): Description vx (None, optional): Description alpha (float, optional): Description Returns:", "new_p2 = 1.0 / new_v new_p1 = new_p2 * new_m frac_t2 = new_p2", "+ dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "special from scipy.optimize import minimize import matplotlib.pyplot as plt import time import pdb", "* self.sf, mx, vx, self.zu) mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd',", "* \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def compute_phi_cavity(self): \"\"\"Summary Returns: TYPE:", "np.newaxis] * KuuinvMcav dKuu_via_mi = -np.einsum('nda,nb->ab', dmiKuuinvMcav, p_i) VcavKuuinvKufi = np.einsum('ndab,nb->nda', Sucav, p_i)", "import numpy as np import scipy.linalg as npalg from scipy import special from", "N = self.N M = self.M ls = np.exp(self.ls) sf2 = np.exp(2 *", "frac_t2 = new_p2 - 1.0 / cav_v frac_t1 = new_p1 - cav_m /", "= 0 def compute_phi_prior(self): \"\"\"Summary Returns: TYPE: Description \"\"\" (sign, logdet) = np.linalg.slogdet(self.Kuu)", "(1 - alpha) * self.x_prev_2[idxs, :] return cav_prev_1 / cav_prev_2, 1.0 / cav_prev_2,", "n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self, idxs, m,", "kfu) dBhat = np.einsum('nd,na,nb->ndab', dv, kfu, kfu) dvcav = np.einsum('ab,ndbc,ce->ndae', Kuuinv, dBhat, Kuuinv)", ":, np.newaxis] temp1 = - np.einsum('nda,ndb->ab', KuuinvVcavKuuinvKufi, p_idlogZ_dvi) temp2 = np.transpose(temp1, [0, 1])", "control_to_emi) self.dyn_layer = SGP_Layer( self.N - 1, self.Din + self.Dcon_dyn, self.Din, self.M) if", "[n], decay=decay, alpha=alpha) self.update_factor_x_sequential( self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, [n +", "m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description", "Suhat + np.einsum('nda,ndb->ndab', muhat, muhat) Bhat = np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv))", "self.zu return params def update_hypers(self, params, key_suffix=''): \"\"\"Summary Args: params (TYPE): Description key_suffix", "v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description v_prop (TYPE): Description m_t (TYPE): Description", "Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout", "no_epochs (int, optional): Description parallel (bool, optional): Description decay (float, optional): Description Returns:", "self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res,", "mu = self.mu Lu = np.linalg.cholesky(Su) epsilon = np.random.randn(self.Dout, self.M) u_sample = mu", "frac_n2 self.x_next_1[idxs, :] = decay * cur_n1 + (1 - decay) * n1_new", "np.zeros([N, Dout]) self.variances.fill(1e20) self.means = np.zeros([N, Dout]) # pep variables self.gamma = np.zeros([Dout,", "self.sgp_layer.update_factor( idxs, m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) if find_log_lik:", "mode') def compute_transition_tilted(self, m_prop, v_prop, m_t, v_t, alpha): \"\"\"Summary Args: m_prop (TYPE): Description", "mcav, vcav, n1cav, n2cav, idxs, decay=0.0, alpha=1.0): \"\"\"Summary Args: mode (TYPE): Description dmcav", "mode, idxs, alpha): \"\"\"Summary Args: mode (TYPE): Description idxs (TYPE): Description alpha (TYPE):", "self.sf, x, self.zu) mout = np.einsum('nm,ndm->nd', kfu, Ahat) Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu,", "from utils import * from kernels import * from lik_layers import Gauss_Layer, Probit_Layer,", "inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2 * dvcav Sunew = Suhat -", "Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din,", "self.x_up_2[idxs, :] + self.x_prev_2[idxs, :] cav_next_1 += (1 - alpha) * self.x_next_1[idxs, :]", "np.einsum('dab,bc->dac', self.Su, self.Kuuinv)) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train (None, optional): Description", "sf2 * dsf2 # compute the gradients Vmm = Su + np.einsum('da,db->dab', mu,", "= self.KuuinvKuf[:, idxs].T h, beta_si, gamma_si = prop_info[0], prop_info[1], prop_info[2] kfu = self.Kfu[idxs,", "1.0 / (cav_up_2 + 1e-16), cav_up_1, cav_up_2 elif mode == self.PREV: cav_prev_1 =", "(TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description x (TYPE):", "for entire dataset # TODO: minibatch parallel idxs = np.arange(self.N) y = self.y_train[idxs,", "self.Suinv = Vinv self.Su = np.linalg.inv(Vinv) self.mu = np.einsum('dab,db->da', self.Su, T1u) self.gamma =", "self.Dout]) cav_m_n, cav_v_n, _, _ = self.compute_cavity_x([ n], alpha) (mn, vn, extra_res) =", "prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description alpha (float, optional): Description", "\\ self.sgp_layer.forward_prop_thru_cav( [n], xn, alpha=alpha) logZn, dmn, dvn = \\ self.lik_layer.compute_log_Z(mn, vn, yn,", "2 * self.sf, x, self.zu) mout = np.einsum('nm,dm->nd', kfu, A) Bpsi2 = np.einsum('dab,na,nb->nd',", "Suinvm, axis=2) wnS = np.einsum('na,dab->ndb', p_i, Su) wnSwn = np.sum(wnS * p_i[:, np.newaxis,", "= lik_grad[key] energy /= N for key in grad_all.keys(): grad_all[key] /= N return", "+ self.Dcon_dyn, self.Din, self.M) if gp_emi: self.emi_layer = SGP_Layer( self.N, self.Din + self.Dcon_emi,", "2*self.sf, x_train) KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances) T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var,", "optional): Description Raises: NotImplementedError: Description \"\"\" super(SGPLVM, self).__init__(y_train, hidden_size, no_pseudo, lik, prior_mean, prior_var)", "np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self, x_train=None, key_suffix=''): \"\"\"Summary Args: x_train", "alpha) grad_hyper, grad_cav = self.sgp_layer.backprop_grads_reg( m, v, dm, dv, extra_res, x, alpha=alpha) self.sgp_layer.update_factor(", "(TYPE): Description dv (TYPE): Description extra_args (TYPE): Description mx (TYPE): Description vx (TYPE):", "\"\"\" def __init__(self, y_train, hidden_size, no_pseudo, lik='Gaussian', prior_mean=0, prior_var=1): \"\"\"Summary Args: y_train (TYPE):", "mout = np.einsum('nm,dm->nd', psi1, A) Bpsi2 = np.einsum('dab,nab->nd', B, psi2) vout = psi0", "= no_pseudo self.N = N = no_train # factor variables self.variances = np.zeros([N,", "cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha, decay): \"\"\"Summary Args:", "self.variances[idxs, :] mean_i = self.means[idxs, :] dlogZd_dmi2 = 1.0 / (variance_i/alpha - np.sum(k_i[:,", "= 1.0 / post_2 mx = post_1 / post_2 return mx, vx #", "# return params class SGP_Layer_rank_one(object): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description", "Description x_next_2 (TYPE): Description x_prev_1 (TYPE): Description x_prev_2 (TYPE): Description x_prior_1 (TYPE): Description", "= np.zeros((Din, )) d2imed = np.median(x_dist[triu_ind]) for i in range(Din): ls[i] = np.log(d2imed", "alpha=alpha) logZ, dm, dv = \\ self.lik_layer.compute_log_Z(m, v, y, alpha) grad_hyper, grad_cav =", "post_2 mx = post_1 / post_2 return mx, vx # def predict_f(self, inputs):", "vn, extra_res) = \\ self.sgp_layer.forward_prop_thru_cav( [n], cav_m_n, cav_v_n, alpha=alpha) logZn, dmn, dvn =", "= muhat + np.einsum('ndab,ndb->nda', Suhat, dmcav) inner = np.einsum('nda,ndb->ndab', dmcav, dmcav) - 2", "no_pseudo, lik, prior_mean, prior_var) self.sgp_layer = SGP_Layer(self.N, self.Din, self.Dout, self.M) # natural params", "decay) * n2_new else: raise NotImplementedError('unknown mode') def update_factor_x_sequential( self, mode, dmcav, dvcav,", "else: self.inf_sequential(e, alpha, decay) except KeyboardInterrupt: print 'Caught KeyboardInterrupt ...' def compute_cavity_x(self, mode,", "kfu = self.Kfu[idxs, :] variance_i = self.variances[idxs, :] mean_i = self.means[idxs, :] #", "[idxs[i]], m_cav, v_cav, dm, dm2, dv, alpha, prop_info) self.sgp_layer.update_posterior(None, new_hypers=False) def update_hypers(self, params):", "(TYPE): Description sn (int): Description updated (bool): Description x_next_1 (TYPE): Description x_next_2 (TYPE):", "= np.sum(self.t1, axis=0) self.Su = np.linalg.inv(self.Suinv) self.mu = np.einsum('dab,db->da', self.Su, self.SuinvMu) def init_hypers(self,", "Returns: TYPE: Description Raises: NotImplementedError: Description \"\"\" new_m = mcav + vcav *", "if alpha == 1: # rho = 0.5 # n1_new = 1.0 /", "post_m_mean) / post_m_std post_v = 0.1 * np.ones_like(post_m) post_2 = 1.0 / post_v", "alpha): \"\"\"Summary Args: n (TYPE): Description mx (TYPE): Description vx (TYPE): Description alpha", "numpy as np import scipy.linalg as npalg from scipy import special from scipy.optimize", "extra_res, x, alpha=alpha) self.sgp_layer.update_factor( idxs, alpha, grad_cav, extra_res, decay=decay) except KeyboardInterrupt: print 'Caught", "Kuuinv, Kuu and its gradients self.zu = np.zeros([M, Din]) self.Kuu = np.zeros([M, M])", "/ var_new_parallel # n2_new = mean_new_parallel / var_new_parallel # n1_ori = 1.0 /", "vx (None, optional): Description alpha (float, optional): Description Returns: TYPE: Description \"\"\" if", "M]) self.Splusmm = np.zeros([Dout, M, M]) # numpy variable for inducing points, Kuuinv,", "v, dm, dv, extra_args, mx, vx, alpha=1.0): \"\"\"Summary Args: m (TYPE): Description v", "np.sum(dv) dsf = 2 * sf2 * dsf2 # compute the gradients Vmm", "self.x_prev_1 = np.zeros((N, Din)) self.x_prev_2 = np.zeros((N, Din)) self.x_next_1 = np.zeros((N, Din)) self.x_next_2", "mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE): Description alpha (float,", "alpha)) mean_new_parallel = mean_div_var_i_new * var_new_parallel # if alpha == 1: # rho", "(1 - alpha) * cur_n1 + frac_n1 n2_new = (1 - alpha) *", "self.compute_cavity_x_sequential(self.NEXT, [n], alpha) (mprop, vprop, extra_res) = \\ self.dyn_layer.forward_prop_thru_cav( [n], cav_tm1_m, cav_tm1_v, alpha=alpha)", "dKfu_via_vi, axis=1) dsf2, dls, dzu = compute_kfu_derivatives( dKfu_via_logZ, kfu, np.exp(self.ls), sf2, x, self.zu)", "# Args: # inputs (TYPE): Description # Returns: # TYPE: Description # \"\"\"", "self.Kuuinv # compute grads wrt Ahat and Bhat dAhat = np.einsum('nd,nm->ndm', dm, kfu)", "n1_new = (1 - alpha) * cur_n1 + frac_n1 n2_new = (1 -", "var_new_parallel * n2_damped self.variances[idxs, :] = var_new_parallel self.means[idxs, :] = mean_new_parallel def backprop_grads_reg(self,", "and Su for each layer self.update_posterior() class SGPR(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description", "_forward_prop_deterministic_thru_post(self, x): \"\"\"Summary Args: x (TYPE): Description Returns: TYPE: Description \"\"\" Kuuinv =", "= np.zeros((N, Din)) self.x_prior_1 = prior_mean / prior_var self.x_prior_2 = 1.0 / prior_var", "m_t (TYPE): Description v_t (TYPE): Description alpha (TYPE): Description Returns: TYPE: Description \"\"\"", "key_suffix] = self.sf params_zu_i = self.zu params['zu' + key_suffix] = self.zu return params", "compute mu and Su for each layer self.update_posterior(x_train, new_hypers=True) def compute_cavity(self, idxs, alpha):", "def objective_function(self, params, mb_size, alpha=1.0, prop_mode=PROP_MM): \"\"\"Summary Args: params (TYPE): Description mb_size (TYPE):", "dv_s, xb, alpha, prop_info) lik_grad = self.lik_layer.backprop_grads( m_cav, v_cav, dm, dv, alpha, scale_logZ)", "np.sum(logdet) phi_posterior += 0.5 * \\ np.sum(self.mu * np.linalg.solve(self.Su, self.mu)) return phi_posterior def", "- decay) * n2_new elif mode == self.PREV: cur_n1 = self.x_prev_1[idxs, :] cur_n2", "dm *= 0 # dm2 *= 0 # dm2 += 1e-16 # dv", "cur_n2 + (1 - decay) * n2_new else: raise NotImplementedError('unknown mode') def get_posterior_x(self):", "Din)) else: if N < 10000: centroids, label = kmeans2(x_train, M, minit='points') else:", "energy = sgp_contrib + lik_contrib grad_all = {} for key in sgp_grad.keys(): grad_all[key]", "= 1 phi_prior = self.compute_phi_prior() phi_post = self.compute_phi_posterior() phi_cav = self.compute_phi_cavity() phi =", "self.t1 = np.zeros([N, Dout, M]) self.t2 = np.zeros([N, Dout, M, M]) # TODO", "v (TYPE): Description dm (TYPE): Description dv (TYPE): Description extra_args (TYPE): Description mx", "alpha * t2n SuinvMuhat = self.SuinvMu - alpha * t1n Suhat = np.linalg.inv(Suinvhat)", "= np.sum(wnS * p_i[:, np.newaxis, :], axis=2) mwn = np.sum(mu * p_i[:, np.newaxis,", "1.0 / post_v post_1 = post_2 * post_m self.tx1 = post_1 - self.t01", "'vcav': dvcav} return grad_hyper, grad_cav def update_factor(self, n, alpha, grad_cav, extra_args, decay=0): \"\"\"Summary", "elif mode == self.PREV: idxs = np.arange(1, self.N) cav_prev_1 = self.x_up_1[idxs, :] +", "TODO: update posterior self.Su = Sunew[0, :, :, :] self.mu = munew[0, :,", "for n in range(self.N): yn = self.y_train[n, :].reshape([1, self.Dout]) cav_m_n, cav_v_n, _, _", "Description grad_cav (TYPE): Description extra_args (TYPE): Description decay (int, optional): Description Returns: TYPE:", "self.PREV, dmt, dvt, cav_t_m, cav_t_v, cav_t_1, cav_t_2, decay=decay, alpha=alpha) def inf_sequential(self, epoch, alpha,", "update_posterior(self, x_train=None, new_hypers=False): \"\"\"Summary Returns: TYPE: Description \"\"\" # compute the posterior approximation", "+ (1.0 - rho) * n2_ori # var_new_parallel = 1.0 / n1_damped #", ":], fmt='+r', yerr=np.sqrt(np.diag(self.Su[0, :, :]))) # axs[1].errorbar(self.zu[:, 0]+0.05, self.mu[1, :], fmt='+r', yerr=np.sqrt(np.diag(self.Su[1, :,", "self.x_prior_1 cav_up_2[0, :] += self.x_prior_2 return cav_up_1 / (cav_up_2 + 1e-16), 1.0 /", "self.Kuuinv Su = self.Su mu = self.mu Suinv = self.Suinv p_i = self.KuuinvKuf[:,", "sn2 = np.exp(2 * self.sn) v_sum = v_t + v_prop + sn2 /", "compute_energy=False, display_steps=10): if parallel: return self.run_pep_parallel( train_idxs, no_sweeps, alpha, compute_energy, display_steps) else: #", "sf (int): Description Splusmm (TYPE): Description Su (TYPE): Description Suinv (TYPE): Description SuinvMu", "(e, no_epochs) if not parallel: for n in range(self.N): yn = self.y_train[n, :].reshape([1,", "mn, vn, dmn, dvn, extra_res, xn, alpha=alpha) self.sgp_layer.update_factor( [n], alpha, grad_cav, extra_res) else:", "self.Dout, self.M) # natural params for latent variables self.tx1 = np.zeros((self.N, self.Din)) self.tx2", "grads class SGPR_rank_one(Base_SGPR): \"\"\"Summary Attributes: Din (TYPE): Description Dout (TYPE): Description lik_layer (TYPE):", "Description lik (TYPE): Description M (TYPE): Description N (TYPE): Description sgp_layer (TYPE): Description", "= mean_i / variance_i # n1_damped = rho * n1_new + (1.0 -", "M]) self.Kuuinv = np.zeros([M, M]) # variables for the hyperparameters self.ls = np.zeros([Din,", "= np.einsum( 'ab,ndbc->ndac', Kuuinv, np.einsum('ndab,bc->ndac', Smm, Kuuinv)) - Kuuinv psi0 = np.exp(2 *", "Description # Returns: # TYPE: Description # \"\"\" # if not self.updated: #", "import sys import math import numpy as np import scipy.linalg as npalg from", "2*dhyp[1] + dls grads['zu'] = dhyp[2] + dzu return sgp_contrib, grads class SGPR_rank_one(Base_SGPR):" ]
[ "CheckBox((10, 10, -10, 22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback)", "Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py files", ".py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def openPythonFilesInDrawBotCallback(self, sender): setExtensionDefault(\"com.drawBot.openPyFileDirectly\",", "getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot", "self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open", "class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10,", "def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10,", "import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\")", "* from mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250,", "import * from mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w =", "= Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py", "10, -10, 22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open()", "self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\",", "files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def openPythonFilesInDrawBotCallback(self, sender): setExtensionDefault(\"com.drawBot.openPyFileDirectly\", sender.get())", "= CheckBox((10, 10, -10, 22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False),", "45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py files directly", "from mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45),", "vanilla import * from mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w", "22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def openPythonFilesInDrawBotCallback(self,", "__init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22),", "directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def openPythonFilesInDrawBotCallback(self, sender): setExtensionDefault(\"com.drawBot.openPyFileDirectly\", sender.get()) DrawBotSettingsController()", "setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot =", "from vanilla import * from mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self):", "Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py files directly in DrawBot.\",", "\"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10, -10, 22), \"Open .py files directly in", "DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot Settings\") self.w.openPythonFilesInDrawBot = CheckBox((10, 10,", "\"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def openPythonFilesInDrawBotCallback(self, sender):", "mojo.extensions import getExtensionDefault, setExtensionDefault class DrawBotSettingsController(object): def __init__(self): self.w = Window((250, 45), \"DrawBot", "-10, 22), \"Open .py files directly in DrawBot.\", value=getExtensionDefault(\"com.drawBot.openPyFileDirectly\", False), callback=self.openPythonFilesInDrawBotCallback) self.w.open() def" ]
[ "slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow *", "self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow))", "if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉", "动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12", "动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast =", "# 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) #", "slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" #", "\"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast ==", "# 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key <", "昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today > slow_today: #", "0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060", "slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line) >= 2:", "返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self,", "fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast", "np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import", "0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080", "fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday", "+ 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least", "= '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23", "2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09", "2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20", "today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line =", "last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for slow in", "2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28", "division import math import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from", "if len(fast_line) >= 2 and len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1]", "258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640", "默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key", "today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg:", "calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >=", "BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine", "slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and", "2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change())", "self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max =", "216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07", "fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if", "2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28", "\"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): #", "230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26", "'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: #", "60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close,", "拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow *", "# logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line =", "benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线", "in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\"", "动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast", "month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow", "0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today =", "= kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow:", "3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: #", "benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least", "int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2", "快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" # def buy_tomorrow(self): # return self.make_buy_order(self.today_ind)", "pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070", "self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast = True", "2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01", "!') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) #", ".ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from", "* 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least == 3: #", "# 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key =", "math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代,", "slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date ==", "fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" # def", "0.05) elif least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow", "= '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def", "249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791", "import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD,", "fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line", "动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if", "计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg:", "对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max)", "self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max, 5):", "206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250", "# 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow:", "\"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min,", "return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10,", "\"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def", "== 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else:", "today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast),", "super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\"", "if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month", "import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线", "__future__ import absolute_import from __future__ import print_function from __future__ import division import math", "eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\"", "self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1)", "0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样,", "= int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key < 0: # 默认值为慢线的0.15", "-1) self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow =", "= False if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow = True #", "self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3", "slow_yesterday >= fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days", "kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow", "= '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05", "== 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif", "== -1: self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow',", "# 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >=", "# 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用:", "= benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) #", "import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL", "self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold", "fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast:", "0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016", "0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close,", "0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def", "对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if", "# fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线", "__future__ import print_function from __future__ import division import math import numpy as np", "返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change", "self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow = True", ">= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] =", "\"\"\" from __future__ import absolute_import from __future__ import print_function from __future__ import division", "251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303", "today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\"", "self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow =", "and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" #", "222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18", "math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20", "2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close,", "四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\"", "均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast =", "slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today >", "import print_function from __future__ import division import math import numpy as np from", "# 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow", "* 0.05) elif least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return", "迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: #", "fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1]", "2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24", "直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60", "0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202", "kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name =", "end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key < 0: #", "# noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线", "line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow *", "eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least == 2: # 二次拟合可以表达:fast=slow *", "226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15", "logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line", "math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow *", "0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') #", "self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today)", "# 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark", "2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16", "设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today):", "def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 >", "> self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return", "* 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5)", "_dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值", "self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging", "return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key -", "self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name,", "return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法", ">= 2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] #", "self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max", "least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3)", "benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end", "2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064", "5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule,", "print_function from __future__ import division import math import numpy as np from .ABuFactorBuyBase", "2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold:", "# 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) #", "slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least == 2: # 二次拟合可以表达:fast=slow * 0.15", "15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: #", "False if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线", "222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420", "min_periods=1) if len(fast_line) >= 2 and len(slow_line) >= 2: # 今天的快线值 fast_today =", "= AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least ==", "动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import", "eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow", "_dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15", "self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today):", "self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow", "class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max:", "# 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday", "self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下:", "if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast,", "self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today)", "# 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow * 0.05", "0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change >", "AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import", "> slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" # def buy_tomorrow(self):", "change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03", "计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line) >=", "..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class", "how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932", "* 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg:", "0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df =", "slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean()", "60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min", "= 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow =", "benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key", "0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000", "慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1", "动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast", "and len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday =", "__author__ = '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\"", "elif least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow *", "0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow #", "-*- \"\"\" 买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__ import print_function from", "slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度,", "**kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" #", "# 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise", "根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow", "kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !')", "2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13", "2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02", "slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd #", "least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05)", "= kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow", "math.ceil(self.ma_slow * 0.15) elif least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18", "2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13", "# 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if", "= self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging #", "- 20 if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) #", "..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ =", "kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast", "* 0.15) elif least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return", "# 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" # def buy_tomorrow(self): # return", "0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule,", "slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\" # def buy_tomorrow(self): #", ">= fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days =", "kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast", "how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26", "2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02", "0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\"", "一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字", "kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: #", "AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin):", "0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for", "def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12", "min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and", "xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name", "'阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self,", "2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06", "2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05", "# 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False)", "0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280", "start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month =", "abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038", "self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow =", "self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name", "'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast:", "2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037", "# xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字", "return 60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change =", "\"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552", "198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441", "self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast,", "\"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl", "# import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" #", "if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key)", "= fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if", "abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15", "2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068", "1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least", "def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if", "benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line')", "-1) self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast =", "__weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs):", "last_kl.empty: # 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule =", "= fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] #", "return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow", "2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25", "一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least == 2:", "2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least", "else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self,", "* 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下:", "raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 #", "math import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import", "self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)", "self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return", "self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow", "0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least == 3: # 三次拟合可以表达:fast=slow", "2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13", "self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__,", "= False if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast = True #", "# -*- encoding:utf-8 -*- \"\"\" 买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__", "四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天", "kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast", "+ 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)", "0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度", "math.ceil(self.ma_slow * 0.05) elif least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9", "logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close,", "rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663", "'{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728", "from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__", "# 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) #", "self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >=", "self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs)", "= 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\"", "self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close,", "通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if", "2 and len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday", "0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30", "self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max,", "2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06", ">= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy,", "= kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold =", "100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12)", "if least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow *", "import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' #", "= benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') #", "slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return", "AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1:", "0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key", "# 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least ==", "if last_kl.empty: # 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule", "if slow_yesterday >= fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow()", "\"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for slow", "return math.ceil(self.ma_slow * 0.15) elif least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg:", "177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03", "要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key < 0:", "* 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg:", "= '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def", "== today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key", "kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False", "1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def", "# 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1]", "2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13", "# 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow == -1:", "slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today > slow_today: # 快线上穿慢线,", "如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow *", "2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16", "numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from", "= True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min',", "= self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min,", "fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2]", "= slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入", "266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766", "= calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line) >= 2: #", "len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2]", "2015-09-16 0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25", "if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max,", "* 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least == 2: #", "5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow", "# 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow", "\"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date]", "import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices", "# 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today > slow_today:", "动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl =", "import absolute_import from __future__ import print_function from __future__ import division import math import", "self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >=", "使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month", "change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today):", "* 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新", "# 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date,", "0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033", "均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1)", "for slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule,", "rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01", "today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key =", "= kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast = 5", "math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line", "True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10)", "= self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__,", "217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574", "from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' # noinspection PyAttributeOutsideInit", "self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min =", "as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper", "ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow + 1 # 设置好xd后可以直接使用基类针对xd的初始化", "0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间", "rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return", "0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083", "'{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg:", "244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000", "how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100", "236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596", "* 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5", "计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1)", "AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100", "\"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10", "= calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line)", "-1: self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100)", "采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return", "benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: #", "2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()) 2014-09-03 0.037 2014-10-15 0.068 2014-11-26 0.086", "0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow * 0.05) elif least == 2: # 二次拟合可以表达:fast=slow", "均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow", "def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线", "2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24", "\"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9", "least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15)", "self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) #", "2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060 2015-08-05 0.083 2015-09-16 0.033 2015-10-28 0.060 2015-12-09", ">= 2 and len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值", "均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast", "# 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least ==", "== -1: self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max',", "np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg:", "import division import math import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin", "_init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\"", "> 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty: # 返回慢线默认值60 return 60", "2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close,", "20 if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据", "* 0.15) # 要拿大盘最近一个月的走势,准备切片的start,end end_key = int(benchmark_today.iloc[0].key) start_key = end_key - 20 if", "\"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\" # 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close,", "if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast =", "# 设置好xd后可以直接使用基类针对xd的初始化 super(AbuDoubleMaBuy, self)._init_self(**kwargs) # 在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self,", "import math import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa", "int(benchmark_today.iloc[0].key) start_key = end_key - 20 if start_key < 0: # 默认值为慢线的0.15 return", "eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date", "benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return math.ceil(self.ma_slow", "0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg:", "= self._dynamic_calc_slow(today) if self.dynamic_fast: # 动态计算快线 self.ma_fast = self._dynamic_calc_fast(today) # 动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name =", "noinspection PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow:", "def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow *", "0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if", "len(fast_line) >= 2 and len(slow_line) >= 2: # 今天的快线值 fast_today = fast_line[-1] #", "from __future__ import absolute_import from __future__ import print_function from __future__ import division import", "* 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line =", "今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today =", "昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday =", "0.15) elif least == 3: # 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow", "0.033 2015-10-28 0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060", "max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow", "买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__ import print_function from __future__ import", "# 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today):", "if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow", "return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象", "eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07", "eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30", "\"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线", "'{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self,", "calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line) >= 2: # 今天的快线值", "absolute_import from __future__ import print_function from __future__ import division import math import numpy", "切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值, 对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度, 上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today,", "== 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif", "least = benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3", "* 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df", "eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd", "from __future__ import print_function from __future__ import division import math import numpy as", "= slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday and fast_today", "self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow", "if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max", "rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640 2014-10-15 240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18", "kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast = 5 self.dynamic_fast", "self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow *", "slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 return", "self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow)) def fit_day(self, today): \"\"\"双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号\"\"\"", "fast_yesterday and fast_today > slow_today: # 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入 return self.buy_tomorrow() \"\"\"可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率\"\"\"", "-*- encoding:utf-8 -*- \"\"\" 买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__ import", "= benchmark_month_line.show_least_valid_poly(show=False) if least == 1: # 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 return", "# 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast == -1:", "self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if", "0.037 2014-10-15 0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153", "动态重新计算后,改变在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) # import logging # logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast,", "5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False", "三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势", "__future__ import division import math import numpy as np from .ABuFactorBuyBase import AbuFactorBuyXD,", "10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线", "= True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow", "今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值 slow_yesterday = slow_line[-2] if slow_yesterday >= fast_yesterday", "# 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today", "BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min:", "默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key + 1] #", "* 0.5 eg: slow=60->fast=60*0.5=30 \"\"\" # 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today", "2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change", "2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow,", "encoding:utf-8 -*- \"\"\" 买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__ import print_function", "self.ma_fast >= self.ma_slow: # 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd']", "二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 四次及以上拟合可以表达:fast=slow * 0.5", "# 计算快线 fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1) # 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow),", "2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05", "eg: slow=60->fast=60*0.5=30 return math.ceil(self.ma_slow * 0.5) def _dynamic_calc_slow(self, today): \"\"\" 动态决策慢线的值,规则如下: 切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样, 对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值,", "True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow ==", "= self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15", "elif least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow *", "二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least == 3:", "abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\"", "2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean():", "一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3", "slow # 迭代np.arange(min, max, 5)都不符合就返回max return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if", "benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15)", "from __future__ import division import math import numpy as np from .ABuFactorBuyBase import", "kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线 kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100 kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10 kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12 \"\"\" # 均线快线周期,默认使用5天均线 self.ma_fast = kwargs.pop('fast',", "2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334 2015-09-16 236.2250 2015-10-28 222.0441 2015-12-09 222.0574 2016-01-20", "# 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold', 0.12) if", "# 返回慢线默认值60 return 60 for slow in np.arange(self.resample_min, self.resample_max, 5): rule = '{}D'.format(slow)", "220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511 2015-08-05 244.3334", "int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line) >= 2: # 今天的快线值 fast_today", "fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值 slow_today = slow_line[-1] # 昨天的慢线值", "240.8663 2014-11-26 220.1552 2015-01-07 206.0070 2015-02-18 198.0932 2015-04-01 217.9791 2015-05-13 251.3640 2015-06-24 266.4511", "= kwargs.pop('slow', -1) self.dynamic_slow = False if self.ma_slow == -1: self.ma_slow = 60", "-1: self.ma_fast = 5 self.dynamic_fast = True # 均线慢线周期,默认使用60天均线 self.ma_slow = kwargs.pop('slow', -1)", "= abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean') 2014-07-23 249.0728 2014-09-03 258.3640", "0.060 2015-12-09 0.000 2016-01-20 0.202 2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027", "end_key - 20 if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15)", "self.ma_fast = kwargs.pop('fast', -1) self.dynamic_fast = False if self.ma_fast == -1: self.ma_fast =", "222.0574 2016-01-20 177.2303 2016-03-02 226.8766 2016-04-13 230.6000 2016-05-25 216.7596 2016-07-06 222.6420 abs(pd_resample(last_kl.close, rule,", "kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10 self.resample_min = kwargs.pop('resample_min', 10) # 动态慢线可设置参数代表慢线的选取阀值,默认0.12 self.change_threshold = kwargs.pop('change_threshold',", "在输出生成的orders_pd中显示的名字 self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow) def _dynamic_calc_fast(self, today): \"\"\" 根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下: 如果大盘最近一个月走势使用: 一次拟合可以表达:fast=slow", "from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ =", "0.068 2014-11-26 0.086 2015-01-07 0.064 2015-02-18 0.038 2015-04-01 0.100 2015-05-13 0.153 2015-06-24 0.060", "< 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key", "today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow = self._dynamic_calc_slow(today) if self.dynamic_fast: #", "pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant' # noinspection", "import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布'", "how='mean').pct_change()).mean(): 0.080 \"\"\" if change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow", "return self.resample_max def fit_month(self, today): # fit_month即在回测策略中每一个月执行一次的方法 if self.dynamic_slow: # 一定要先动态算ma_slow,因为动态计算fast依赖slow self.ma_slow =", "rule = '{}D'.format(slow) change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean() \"\"\" eg: pd_resample(last_kl.close, rule, how='mean')", "start_key = end_key - 20 if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow", "# 计算慢线 slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1) if len(fast_line) >= 2 and len(slow_line)", "2016-03-02 0.280 2016-04-13 0.016 2016-05-25 0.060 2016-07-06 0.027 abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean(): 0.080 \"\"\"", "# 慢线周期必须大于快线 raise ValueError('ma_fast >= self.ma_slow !') # xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉 kwargs['xd'] = self.ma_slow +", "return math.ceil(self.ma_slow * 0.05) elif least == 2: # 二次拟合可以表达:fast=slow * 0.15 eg:", "slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least == 3: # 三次拟合可以表达:fast=slow * 0.3", "False if self.ma_slow == -1: self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90", "= end_key - 20 if start_key < 0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow *", "from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin from ..IndicatorBu.ABuNDMa import calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample", "三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18 return math.ceil(self.ma_slow * 0.3) else: # 四次及以上拟合可以表达:fast=slow *", "上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新 采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值 \"\"\" last_kl = self.past_today_kl(today, self.resample_max) if last_kl.empty:", "change > self.change_threshold: \"\"\" 返回第一个大于change_threshold的slow, change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间 \"\"\" return slow # 迭代np.arange(min, max, 5)都不符合就返回max", "calc_ma_from_prices from ..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__", "0.05 eg: slow=60->fast=60*0.05=3 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18", "2: # 今天的快线值 fast_today = fast_line[-1] # 昨天的快线值 fast_yesterday = fast_line[-2] # 今天的慢线值", "= 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) # 动态慢线可设置参数重采样周期最小值,默认10", "eg: slow=60->fast=60*0.15=9 return math.ceil(self.ma_slow * 0.15) elif least == 3: # 三次拟合可以表达:fast=slow *", "1] # 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象 benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line') # 计算这个月最少需要几次拟合才能代表走势曲线 least =", "\"\"\" 买入择时示例因子:动态自适应双均线策略 \"\"\" from __future__ import absolute_import from __future__ import print_function from __future__", "策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势 benchmark_df = self.benchmark.kl_pd # 拿出大盘的今天 benchmark_today = benchmark_df[benchmark_df.date == today.date] if benchmark_today.empty:", "..CoreBu.ABuPdHelper import pd_resample from ..TLineBu.ABuTL import AbuTLine __author__ = '阿布' __weixin__ = 'abu_quant'", "0: # 默认值为慢线的0.15 return math.ceil(self.ma_slow * 0.15) # 使用切片切出从今天开始向前20天的数据 benchmark_month = benchmark_df[start_key:end_key +", "PyAttributeOutsideInit class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin): \"\"\"示例买入动态自适应双均线策略\"\"\" def _init_self(self, **kwargs): \"\"\" kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线 kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线", "self.ma_slow = 60 self.dynamic_slow = True # 动态慢线可设置参数重采样周期最大值,默认90 self.resample_max = kwargs.pop('resample_max', 100) #" ]
[ "hasattr(module, member): old_member = None else: old_member = getattr(module, member) setattr(module, member, new_value)", "in sys.modules: return None module = sys.modules[module] if not hasattr(module, member): old_member =", "member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a", "[] for idx in range(0, len(args), 3): module, member, new_value = args[idx :", "len(args), 3): module, member, new_value = args[idx : idx + 3] tokens.append(begin_patch(module, member,", "member, old_member = token if old_member is None: delattr(module, member) else: setattr(module, member,", "None: return module, member, old_member = token if old_member is None: delattr(module, member)", "member): old_member = None else: old_member = getattr(module, member) setattr(module, member, new_value) return", "\"\"\"Manager a patch in a contextmanager\"\"\" tokens = [] for idx in range(0,", "new_value = args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally:", "def end_patch(token): if token is None: return module, member, old_member = token if", "in range(0, len(args), 3): module, member, new_value = args[idx : idx + 3]", "member, new_value) return module, member, old_member def end_patch(token): if token is None: return", "and objects\"\"\" import contextlib import sys def begin_patch(module, member, new_value): if isinstance(module, str):", "return module, member, old_member def end_patch(token): if token is None: return module, member,", "if module not in sys.modules: return None module = sys.modules[module] if not hasattr(module,", ": idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally: for token in", "member) setattr(module, member, new_value) return module, member, old_member def end_patch(token): if token is", "None else: old_member = getattr(module, member) setattr(module, member, new_value) return module, member, old_member", "sys.modules: return None module = sys.modules[module] if not hasattr(module, member): old_member = None", "str): if module not in sys.modules: return None module = sys.modules[module] if not", "member, new_value = args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield", "if not hasattr(module, member): old_member = None else: old_member = getattr(module, member) setattr(module,", "= args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally: for", "patch in a contextmanager\"\"\" tokens = [] for idx in range(0, len(args), 3):", "return module, member, old_member = token if old_member is None: delattr(module, member) else:", "module, member, new_value = args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value)) try:", "isinstance(module, str): if module not in sys.modules: return None module = sys.modules[module] if", "def begin_patch(module, member, new_value): if isinstance(module, str): if module not in sys.modules: return", "old_member = token if old_member is None: delattr(module, member) else: setattr(module, member, old_member)", "member, old_member def end_patch(token): if token is None: return module, member, old_member =", "if token is None: return module, member, old_member = token if old_member is", "idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally: for token in tokens[::-1]:", "token is None: return module, member, old_member = token if old_member is None:", "objects\"\"\" import contextlib import sys def begin_patch(module, member, new_value): if isinstance(module, str): if", "a contextmanager\"\"\" tokens = [] for idx in range(0, len(args), 3): module, member,", "begin_patch(module, member, new_value): if isinstance(module, str): if module not in sys.modules: return None", "contextlib import sys def begin_patch(module, member, new_value): if isinstance(module, str): if module not", "tokens = [] for idx in range(0, len(args), 3): module, member, new_value =", "old_member def end_patch(token): if token is None: return module, member, old_member = token", "is None: return module, member, old_member = token if old_member is None: delattr(module,", "import sys def begin_patch(module, member, new_value): if isinstance(module, str): if module not in", "3): module, member, new_value = args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value))", "modules and objects\"\"\" import contextlib import sys def begin_patch(module, member, new_value): if isinstance(module,", "args[idx : idx + 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally: for token", "new_value): if isinstance(module, str): if module not in sys.modules: return None module =", "token if old_member is None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def", "old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens = []", "setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens", "sys def begin_patch(module, member, new_value): if isinstance(module, str): if module not in sys.modules:", "getattr(module, member) setattr(module, member, new_value) return module, member, old_member def end_patch(token): if token", "member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens =", "patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens = [] for idx in", "\"\"\"Patching modules and objects\"\"\" import contextlib import sys def begin_patch(module, member, new_value): if", "old_member is None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager", "= None else: old_member = getattr(module, member) setattr(module, member, new_value) return module, member,", "else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\"", "= [] for idx in range(0, len(args), 3): module, member, new_value = args[idx", "contextmanager\"\"\" tokens = [] for idx in range(0, len(args), 3): module, member, new_value", "+ 3] tokens.append(begin_patch(module, member, new_value)) try: yield finally: for token in tokens[::-1]: end_patch(token)", "a patch in a contextmanager\"\"\" tokens = [] for idx in range(0, len(args),", "idx in range(0, len(args), 3): module, member, new_value = args[idx : idx +", "@contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens = [] for", "sys.modules[module] if not hasattr(module, member): old_member = None else: old_member = getattr(module, member)", "delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch in", "in a contextmanager\"\"\" tokens = [] for idx in range(0, len(args), 3): module,", "None module = sys.modules[module] if not hasattr(module, member): old_member = None else: old_member", "not in sys.modules: return None module = sys.modules[module] if not hasattr(module, member): old_member", "if isinstance(module, str): if module not in sys.modules: return None module = sys.modules[module]", "is None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a", "return None module = sys.modules[module] if not hasattr(module, member): old_member = None else:", "import contextlib import sys def begin_patch(module, member, new_value): if isinstance(module, str): if module", "module, member, old_member def end_patch(token): if token is None: return module, member, old_member", "old_member = None else: old_member = getattr(module, member) setattr(module, member, new_value) return module,", "module = sys.modules[module] if not hasattr(module, member): old_member = None else: old_member =", "= sys.modules[module] if not hasattr(module, member): old_member = None else: old_member = getattr(module,", "module, member, old_member = token if old_member is None: delattr(module, member) else: setattr(module,", "new_value) return module, member, old_member def end_patch(token): if token is None: return module,", "end_patch(token): if token is None: return module, member, old_member = token if old_member", "= getattr(module, member) setattr(module, member, new_value) return module, member, old_member def end_patch(token): if", "None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args): \"\"\"Manager a patch", "setattr(module, member, new_value) return module, member, old_member def end_patch(token): if token is None:", "if old_member is None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager def patch(*args):", "else: old_member = getattr(module, member) setattr(module, member, new_value) return module, member, old_member def", "range(0, len(args), 3): module, member, new_value = args[idx : idx + 3] tokens.append(begin_patch(module,", "not hasattr(module, member): old_member = None else: old_member = getattr(module, member) setattr(module, member,", "for idx in range(0, len(args), 3): module, member, new_value = args[idx : idx", "def patch(*args): \"\"\"Manager a patch in a contextmanager\"\"\" tokens = [] for idx", "old_member = getattr(module, member) setattr(module, member, new_value) return module, member, old_member def end_patch(token):", "member, new_value): if isinstance(module, str): if module not in sys.modules: return None module", "= token if old_member is None: delattr(module, member) else: setattr(module, member, old_member) @contextlib.contextmanager", "module not in sys.modules: return None module = sys.modules[module] if not hasattr(module, member):" ]
[ "stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1] <= value: stack.append(value)", "0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack,", "insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5, 2, -2, 4, 3, 1]))", "return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5, 2,", "stack[-1] <= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__", "insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1] <= value: stack.append(value) return top", "return stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1] <= value:", "top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack)", "<= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ ==", "if len(stack) == 0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return", "sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or", "len(stack) == 0 or stack[-1] <= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack,", "len(stack) == 0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack", "== 0 or stack[-1] <= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value)", "or stack[-1] <= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if", "stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if", "= stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack) ==", "value): if len(stack) == 0 or stack[-1] <= value: stack.append(value) return top =", "== 0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def", "0 or stack[-1] <= value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top)", "def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1] <= value: stack.append(value) return", "sort_array(stack): if len(stack) == 0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top)", "def sort_array(stack): if len(stack) == 0: return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack,", "stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5, 2, -2, 4, 3,", "top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1] <=", "return stack top = stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value):", "stack.pop() sort_array(stack) insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0", "if len(stack) == 0 or stack[-1] <= value: stack.append(value) return top = stack.pop()", "insert_element_in_ordered_stack(stack, top) return stack def insert_element_in_ordered_stack(stack, value): if len(stack) == 0 or stack[-1]", "top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5, 2, -2,", "= stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5, 2, -2, 4,", "value: stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__':", "stack.append(value) return top = stack.pop() insert_element_in_ordered_stack(stack, value) stack.append(top) if __name__ == '__main__': print(sort_array([-5," ]
[ "PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep)", "pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name)", "from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps", "getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import", "source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config )", "= ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\")", "cluster: {aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies", "pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint =", "from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import", "import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig,", "extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace,", "Environment variables env = Env() # Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name,", "pipeline steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata],", "Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\")", "environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[", "Env() # Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure", "(ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def main():", "azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import", "pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline =", "is_directory=True) # List of pipeline steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\",", "published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException:", "compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\",", "entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep(", "# Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep", "getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s - %(levelname)s", "PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name", "except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger", "(Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep,", "resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute =", "pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint =", "preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size,", "list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\",", "parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node,", "step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata,", "training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate()", "cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML", "PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate", "azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\", "try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline,", "def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env = Env() # Azure ML", "<filename>samples/parallel-processing-california-housing-data/ml_service/pipelines/build_pipeline.py \"\"\"Build pipeline.\"\"\" from datetime import datetime from logging import INFO, Formatter, StreamHandler,", "name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config", "pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try:", "script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) #", "PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of", "# List of pipeline steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute,", "PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ],", "logging import INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies", "steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[", "version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name,", "= PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\",", "\"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list)", "Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace:", "append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config )", "inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\",", "Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True)", "import INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import", "# Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata =", "False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")) logger.addHandler(sh) main()", "from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" #", "{pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace,", "environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata", "import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env = Env() #", "ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config =", "= CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment = environment # Pipeline", "pipeline.\"\"\" # Environment variables env = Env() # Azure ML workspace aml_workspace =", "name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate =", "\"\"\"Build pipeline.\"\"\" from datetime import datetime from logging import INFO, Formatter, StreamHandler, getLogger", "logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name)", "error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata],", "outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline", "Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import", "from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env =", "= PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata", "pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False", "List of pipeline steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train,", "# Environment variables env = Env() # Azure ML workspace aml_workspace = Workspace.get(", "azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute", "compute cluster: {aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\")", "logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint", "from logging import INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace from", "PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__", "run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ],", "= Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML", "], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built", "from datetime import datetime from logging import INFO, Formatter, StreamHandler, getLogger from azureml.core", "compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config )", "# Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML", "from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData,", "get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML environment environment =", "= PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list", "conda_dep run_config = RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\",", "RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from", "env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\",", "pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__)", "from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import", "= Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment =", "= Env() # Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, )", "workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML", "= conda_dep run_config = RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata =", "Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline,", "\"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path,", "= Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name,", "logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s - %(levelname)s -", "Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline)", "\"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s -", "CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment = environment # Pipeline Data", "Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name,", "ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment", "step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\",", "description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace,", "logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s", "ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") #", "logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep", "ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step", "azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from", "# Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name)", "run_config.environment = environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\",", "= PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list = list() preparation_step =", "Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute", "description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh", "name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ],", "pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline", "environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata", "import Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables", "name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute cluster", "pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\":", "Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline", "Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment = environment", "mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\",", ") logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute = get_compute(aml_workspace,", "Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\",", "runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline", "conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment = environment #", "inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build", "ML workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure", "= environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True)", "source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step)", "runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute,", "output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute,", "ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build", "env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name)", "\"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold,", "compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config", "= getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s -", "ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils", "PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from", "import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from", ") step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\")", "Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env", "import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env", "azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import", "env = Env() # Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group,", "= list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir,", "logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))", "output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata,", "arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config =", "= PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if", "= PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO)", ") step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count,", "datetime import datetime from logging import INFO, Formatter, StreamHandler, getLogger from azureml.core import", "env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train,", "\"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig(", "ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ == \"__main__\": logger =", "script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step)", "from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration", "from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables", "{aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies =", "= ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step", "# Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster:", "step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout,", "name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep(", "process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config", "= pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint", "training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\",", "{aml_workspace}\") # Azure ML compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute", "arguments=[ \"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline", "training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list = list() preparation_step", "if __name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh =", "import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response import \\ ErrorResponseException", "ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env = Env()", "ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step =", "arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train,", "extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata],", "ML compute cluster: {aml_compute}\") # Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep =", "preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path, outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata,", "environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration()", "StreamHandler, getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig", "compute cluster aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure", "], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata],", "steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat())", "preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) #", "\"\"\"Build pipeline.\"\"\" # Environment variables env = Env() # Azure ML workspace aml_workspace", "aml_compute = get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML environment", "\\ ErrorResponseException from azureml.pipeline.steps import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from", "aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure", "subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") # Azure ML compute cluster aml_compute", "= RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata", "extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps", "name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except ErrorResponseException: pipeline_endpoint = PipelineEndpoint.publish(workspace=aml_workspace, name=env.pipeline_endpoint_name, pipeline=published_pipeline, description=env.pipeline_endpoint_name) if __name__ ==", "parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[", "__name__ == \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler()", "run_config = RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset()", "Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from", "= PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List", "PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list = list() preparation_step = PythonScriptStep(", "is_directory=True).as_dataset() extraction_pipelinedata = PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline", "of pipeline steps step_list = list() preparation_step = PythonScriptStep( name=\"preparation-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.preparation_step_script_path,", "INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies", "outputs=[preparation_pipelinedata], arguments=[ \"--input_path\", env.input_dir, \"--output_path\", preparation_pipelinedata, \"--datastore_name\", env.blob_datastore_name ], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config", "step_list.append(training_step) # Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") #", ") step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path, inputs=[extraction_pipelinedata], outputs=[training_pipelinedata], arguments=[ \"--input_dir\",", "variables env = Env() # Azure ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id,", "import Environment, Workspace from azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core", "extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step)", "], runconfig=run_config ) step_list.append(preparation_step) parallel_run_config = ParallelRunConfig( source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment,", "RunConfiguration() run_config.environment = environment # Pipeline Data preparation_pipelinedata = PipelineData(\"preparation_pipelinedata\", is_directory=True).as_dataset() extraction_pipelinedata =", "\"--input_dir\", extraction_pipelinedata, \"--output_dir\", training_pipelinedata ], runconfig=run_config ) step_list.append(training_step) # Build pipeline pipeline =", "pipeline.\"\"\" from datetime import datetime from logging import INFO, Formatter, StreamHandler, getLogger from", "== \"__main__\": logger = getLogger(__name__) logger.setLevel(INFO) logger.propagate = False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s", "azureml.core.conda_dependencies import CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint)", "pipeline published_pipeline = pipeline.publish(env.pipeline_name, description=env.pipeline_name, version=datetime.utcnow().isoformat()) try: pipeline_endpoint = PipelineEndpoint.get(workspace=aml_workspace, name=env.pipeline_endpoint_name) pipeline_endpoint.add_default(published_pipeline) except", "main(): \"\"\"Build pipeline.\"\"\" # Environment variables env = Env() # Azure ML workspace", "environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config = RunConfiguration() run_config.environment", "= False sh = StreamHandler() sh.setFormatter(Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")) logger.addHandler(sh)", "# Build pipeline pipeline = Pipeline(workspace=aml_workspace, steps=step_list) pipeline.validate() logger.info(f\"Built pipeline {pipeline}\") # Publish", "= get_compute(aml_workspace, env.compute_name) logger.info(f\"Aazure ML compute cluster: {aml_compute}\") # Azure ML environment environment", "datetime from logging import INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment, Workspace", "Azure ML environment environment = Environment(name=env.aml_env_name) conda_dep = CondaDependencies(conda_dependencies_file_path=\"./local_development/dev_dependencies.yml\") environment.python.conda_dependencies = conda_dep run_config", "node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step = ParallelRunStep( name=\"extraction-step\", inputs=[preparation_pipelinedata], output=extraction_pipelinedata, arguments=[ \"--output_dir\", extraction_pipelinedata", "is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list = list()", "\"--output_dir\", extraction_pipelinedata ], parallel_run_config=parallel_run_config ) step_list.append(extraction_step) training_step = PythonScriptStep( name=\"traning-step\", compute_target=aml_compute, source_directory=env.sources_directory_train, script_name=env.training_step_script_path,", "PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def main(): \"\"\"Build pipeline.\"\"\"", "CondaDependencies from azureml.core.runconfig import RunConfiguration from azureml.pipeline.core import (Pipeline, PipelineData, PipelineEndpoint) from azureml.pipeline.core._restclients.aeva.models.error_response", "import (ParallelRunConfig, ParallelRunStep, PythonScriptStep) from ml_service.util.env_variables import Env from ml_service.util.pipeline_utils import get_compute def", "source_directory=env.sources_directory_train, entry_script=env.extraction_step_script_path, mini_batch_size=env.mini_batch_size, error_threshold=env.error_threshold, output_action=\"append_row\", environment=environment, compute_target=aml_compute, node_count=env.node_count, run_invocation_timeout=env.run_invocation_timeout, process_count_per_node=env.process_count_per_node, append_row_file_name=\"extraction_output.txt\") extraction_step =", "PipelineData(\"extraction_pipelinedata\", is_directory=True) training_pipelinedata = PipelineData(\"training_pipelinedata\", is_directory=True) # List of pipeline steps step_list =", "workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\") #", "ML workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) logger.info(f\"Azure ML workspace: {aml_workspace}\")", "import datetime from logging import INFO, Formatter, StreamHandler, getLogger from azureml.core import Environment,", "get_compute def main(): \"\"\"Build pipeline.\"\"\" # Environment variables env = Env() # Azure" ]
[ "\"forward 2\"] def test_part1(): assert part1(test_input) == 150 def test_part2(): assert part2(test_input) ==", "\"up 3\", \"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input) == 150 def", "day2 import part1, part2 test_input = [\"forward 5\", \"down 5\", \"forward 8\", \"up", "[\"forward 5\", \"down 5\", \"forward 8\", \"up 3\", \"down 8\", \"forward 2\"] def", "from day2 import part1, part2 test_input = [\"forward 5\", \"down 5\", \"forward 8\",", "5\", \"down 5\", \"forward 8\", \"up 3\", \"down 8\", \"forward 2\"] def test_part1():", "8\", \"up 3\", \"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input) == 150", "import part1, part2 test_input = [\"forward 5\", \"down 5\", \"forward 8\", \"up 3\",", "5\", \"forward 8\", \"up 3\", \"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input)", "<reponame>jonsth131/aoc from day2 import part1, part2 test_input = [\"forward 5\", \"down 5\", \"forward", "part1, part2 test_input = [\"forward 5\", \"down 5\", \"forward 8\", \"up 3\", \"down", "\"forward 8\", \"up 3\", \"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input) ==", "= [\"forward 5\", \"down 5\", \"forward 8\", \"up 3\", \"down 8\", \"forward 2\"]", "2\"] def test_part1(): assert part1(test_input) == 150 def test_part2(): assert part2(test_input) == 900", "\"down 5\", \"forward 8\", \"up 3\", \"down 8\", \"forward 2\"] def test_part1(): assert", "part2 test_input = [\"forward 5\", \"down 5\", \"forward 8\", \"up 3\", \"down 8\",", "8\", \"forward 2\"] def test_part1(): assert part1(test_input) == 150 def test_part2(): assert part2(test_input)", "\"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input) == 150 def test_part2(): assert", "3\", \"down 8\", \"forward 2\"] def test_part1(): assert part1(test_input) == 150 def test_part2():", "test_input = [\"forward 5\", \"down 5\", \"forward 8\", \"up 3\", \"down 8\", \"forward" ]
[ "rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def", "def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def", "output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution", "crushes Lizard Lizard poisons Spock Spock smashes Scissors Scissors decapitates Lizard Lizard eats", "Paper Paper disproves Spock Spock vaporizes Rock (and as it always has) Rock", "Scissors Scissors decapitates Lizard Lizard eats Paper Paper disproves Spock Spock vaporizes Rock", "\"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice,", "test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14],", "\"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player", "= StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\"", "import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout", "= sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1)", "#Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers", "sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def", "\"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\",", "winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number) response =", "= 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter =", "cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close()", "self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def", "sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def", "chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout =", "import sys from rpsls import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def", "rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None)", "from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self):", "Rock crushes Lizard Lizard poisons Spock Spock smashes Scissors Scissors decapitates Lizard Lizard", "def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def", "def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def", "Rock (and as it always has) Rock crushes scissors \"\"\" rulesList = [", "\"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\",", "\"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"],", "rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\",", "StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper", "self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice)", "rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output =", "sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\")", "def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\",", "Lizard eats Paper Paper disproves Spock Spock vaporizes Rock (and as it always", "2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\")))", "= 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\")", "def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0)", "sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue()", "as it always has) Rock crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\",", "rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First", "\"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\",", "\"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer", "Rock crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\",", "[\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\",", "r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number =", "tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def", "Paper disproves Spock Spock vaporizes Rock (and as it always has) Rock crushes", "disproves Spock Spock vaporizes Rock (and as it always has) Rock crushes scissors", "test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self):", "Lizard Lizard eats Paper Paper disproves Spock Spock vaporizes Rock (and as it", "Paper covers Rock Rock crushes Lizard Lizard poisons Spock Spock smashes Scissors Scissors", "def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self):", "[\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\",", "0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0", "Lizard poisons Spock Spock smashes Scissors Scissors decapitates Lizard Lizard eats Paper Paper", "test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self):", "has) Rock crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\",", "[\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\",", "[\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer", "self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\",", "[\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self,", "[\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\",", "r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number", "sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"),", "test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"),", "from rpsls import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout", "self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\",", "covers Rock Rock crushes Lizard Lizard poisons Spock Spock smashes Scissors Scissors decapitates", "\"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0],", "player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number)", "test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard Lizard poisons Spock Spock smashes", "3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\")))", "self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\",", "\"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer", "\"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\",", "sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self):", "test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second", "rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock", "self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard Lizard poisons", "\"\"\" Paper covers Rock Rock crushes Lizard Lizard poisons Spock Spock smashes Scissors", "None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\")", "rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self):", "Spock vaporizes Rock (and as it always has) Rock crushes scissors \"\"\" rulesList", "[ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"],", "[\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\",", "[\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\",", "sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def", "self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter", "Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue()", "self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number) response = sys.stdout.getvalue()", "\"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]]", "def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self):", "\"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\",", "wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r", "0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line", "self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number) response = sys.stdout.getvalue() self.assertEqual(response,", "eats Paper Paper disproves Spock Spock vaporizes Rock (and as it always has)", "wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"],", "def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard Lizard poisons Spock Spock", "def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue()", "test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self):", "import unittest import sys from rpsls import rpsls from cStringIO import StringIO class", "self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output", "sys from rpsls import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self):", "self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player", "def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def", "wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"],", "Rock Rock crushes Lizard Lizard poisons Spock Spock smashes Scissors Scissors decapitates Lizard", "\"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer", "\"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player", "unittest import sys from rpsls import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase):", "decapitates Lizard Lizard eats Paper Paper disproves Spock Spock vaporizes Rock (and as", "[\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer", "self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"),", "wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and", "[\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in", "test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\")))", "RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def", "for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown()", "= StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self):", "sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self):", "\"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout", "StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout =", "\"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard Lizard poisons Spock", "\"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\",", "\"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice,", "\"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player", "player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number) response = sys.stdout.getvalue() self.assertEqual(response, winner)", "always has) Rock crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"],", "test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\")))", "rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number =", "it always has) Rock crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer", "import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO()", "vaporizes Rock (and as it always has) Rock crushes scissors \"\"\" rulesList =", "rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter", "\"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\",", "test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self):", "assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number,", "def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO()", "4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\")))", "first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes", "Lizard Lizard poisons Spock Spock smashes Scissors Scissors decapitates Lizard Lizard eats Paper", "#First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line =", "test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self):", "= sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard", "StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"),", "rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\")", "in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number", "wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\",", "def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def", "crushes scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player", "self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"),", "scissors \"\"\" rulesList = [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"],", "self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close()", "Scissors decapitates Lizard Lizard eats Paper Paper disproves Spock Spock vaporizes Rock (and", "wins!\\n\"], [\"Spock\", \"scissors\", \"Player wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"],", "= [ [\"paper\", \"scissors\", \"Computer wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player", "\"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for", "Spock smashes Scissors Scissors decapitates Lizard Lizard eats Paper Paper disproves Spock Spock", "wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner):", "and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1],", "poisons Spock Spock smashes Scissors Scissors decapitates Lizard Lizard eats Paper Paper disproves", "self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"),", "self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\", rpsls.number_to_name(rpsls.name_to_number(\"Spock\"))) self.assertEqual(\"paper\", rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1),", "smashes Scissors Scissors decapitates Lizard Lizard eats Paper Paper disproves Spock Spock vaporizes", "def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4) def test_number_to_name_restores_name_to_number(self): self.assertEqual(\"rock\", rpsls.number_to_name(rpsls.name_to_number(\"rock\"))) self.assertEqual(\"Spock\",", "self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0", "1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3)", "tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def", "Spock Spock smashes Scissors Scissors decapitates Lizard Lizard eats Paper Paper disproves Spock", "\"rock\", \"Player and computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList:", "\"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"], [\"rock\", \"rock\", \"Player and computer tie!\\n\"], [\"scissors\",", "r in rulesList: self.assertWinningRule(r[0], r[1], r[2]) def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp()", "wins!\\n\"], [\"scissors\", \"paper\", \"Player wins!\\n\"], [\"rock\", \"lizard\", \"Player wins!\\n\"], [\"Spock\", \"lizard\", \"Computer wins!\\n\"],", "cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice) rpsls.rpsls_compute_winner(player_number, cpu_number) response", "test_first_execution_print_player_chooses(self): rpsls.rpsls_counter = 0 rpsls.rpsls(\"Spock\") output = sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self):", "sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock Rock crushes Lizard Lizard", "rpsls.name_to_number(\"Test\") def test_rock_is_zero(self): self.assertEqual(rpsls.name_to_number(\"rock\"), 0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None)", "Spock Spock vaporizes Rock (and as it always has) Rock crushes scissors \"\"\"", "rpsls.number_to_name(rpsls.name_to_number(\"paper\"))) self.assertEqual(\"lizard\", rpsls.number_to_name(rpsls.name_to_number(\"lizard\"))) self.assertEqual(\"scissors\", rpsls.number_to_name(rpsls.name_to_number(\"scissors\"))) def test_invalid_number_returns_None(self): self.assertEqual(rpsls.number_to_name(-1), None) def test_first_execution_print_player_chooses(self): rpsls.rpsls_counter =", "(and as it always has) Rock crushes scissors \"\"\" rulesList = [ [\"paper\",", "rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution", "<gh_stars>0 import unittest import sys from rpsls import rpsls from cStringIO import StringIO", "def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def", "rpsls.rpsls(\"Test\") sys.stdout.getvalue() sys.stdout.close() sys.stdout = StringIO() #Second execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:],", "wins!\\n\"], [\"lizard\", \"scissors\", \"Computer wins!\\n\"], [\"paper\", \"Spock\", \"Player wins!\\n\"], [\"rock\", \"Spock\", \"Computer wins!\\n\"],", "= sys.stdout.getvalue() self.assertEquals(output[:14], \"Player chooses\") def test_rpsls_blank_line_if_consecutive_games(self): rpsls.rpsls_counter = 0 #First Execution rpsls.rpsls(\"Test\")", "rpsls import rpsls from cStringIO import StringIO class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout =", "class RPSLSTest(unittest.TestCase): def setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__", "0) def test_Spock_is_one(self): self.assertEqual(rpsls.name_to_number(\"Spock\"), 1) def test_spock_is_invalid(self): self.assertEqual(rpsls.name_to_number(\"spock\"), None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2)", "computer tie!\\n\"], [\"scissors\", \"rock\", \"Computer wins!\\n\"]] for r in rulesList: self.assertWinningRule(r[0], r[1], r[2])", "def assertWinningRule(self, player_choice, cpu_choice, winner): self.tearDown() self.setUp() player_number = rpsls.name_to_number(player_choice) cpu_number = rpsls.name_to_number(cpu_choice)", "None) def test_paper_is_two(self): self.assertEqual(rpsls.name_to_number(\"paper\"), 2) def test_lizard_is_three(self): self.assertEqual(rpsls.name_to_number(\"lizard\"), 3) def test_scissors_is_four(self): self.assertEqual(rpsls.name_to_number(\"scissors\"), 4)", "setUp(self): sys.stdout = StringIO() def tearDown(self): sys.stdout.close() sys.stdout = sys.__stdout__ def test_can_call_rpsls_module(self): rpsls.name_to_number(\"Test\")", "execution rpsls.rpsls(\"Spock\") first_line = sys.stdout.getvalue() self.assertEquals(first_line[-2:], \"\\n\\n\") def test_victory_rules(self): \"\"\" Paper covers Rock" ]
[ "una lista debe contener solo los pares y la segunda lista solo debe", "separar en dos listas # una lista debe contener solo los pares y", "solo los pares y la segunda lista solo debe contener los impares lista", "Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos listas #", "solo debe contener los impares lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[]", "# Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos listas", "@author: AMD \"\"\" # Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar", "[10,20,30,10,5, 1, 3, 5, 4] separar en dos listas # una lista debe", "on Wed Feb 9 07:33:20 2022 @author: AMD \"\"\" # Dada la lista", "lista: if num % 2 == 0: Pares.append(num) else: Impares.append(num) print(lista) print(Pares) print(Impares)", "in lista: if num % 2 == 0: Pares.append(num) else: Impares.append(num) print(lista) print(Pares)", "lista debe contener solo los pares y la segunda lista solo debe contener", "5, 4] separar en dos listas # una lista debe contener solo los", "# una lista debe contener solo los pares y la segunda lista solo", "lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos listas # una lista", "\"\"\" # Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos", "en dos listas # una lista debe contener solo los pares y la", "2022 @author: AMD \"\"\" # Dada la lista [10,20,30,10,5, 1, 3, 5, 4]", "la segunda lista solo debe contener los impares lista = [10,20,30,10,5, 1, 3,", "lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for num in lista:", "la lista [10,20,30,10,5, 1, 3, 5, 4] separar en dos listas # una", "3, 5, 4] separar en dos listas # una lista debe contener solo", "Created on Wed Feb 9 07:33:20 2022 @author: AMD \"\"\" # Dada la", "utf-8 -*- \"\"\" Created on Wed Feb 9 07:33:20 2022 @author: AMD \"\"\"", "\"\"\" Created on Wed Feb 9 07:33:20 2022 @author: AMD \"\"\" # Dada", "debe contener solo los pares y la segunda lista solo debe contener los", "4] separar en dos listas # una lista debe contener solo los pares", "Impares=[] for num in lista: if num % 2 == 0: Pares.append(num) else:", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Feb 9 07:33:20 2022", "contener los impares lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for", "AMD \"\"\" # Dada la lista [10,20,30,10,5, 1, 3, 5, 4] separar en", "1, 3, 5, 4] Pares=[] Impares=[] for num in lista: if num %", "4] Pares=[] Impares=[] for num in lista: if num % 2 == 0:", "Pares=[] Impares=[] for num in lista: if num % 2 == 0: Pares.append(num)", "y la segunda lista solo debe contener los impares lista = [10,20,30,10,5, 1,", "segunda lista solo debe contener los impares lista = [10,20,30,10,5, 1, 3, 5,", "Wed Feb 9 07:33:20 2022 @author: AMD \"\"\" # Dada la lista [10,20,30,10,5,", "listas # una lista debe contener solo los pares y la segunda lista", "9 07:33:20 2022 @author: AMD \"\"\" # Dada la lista [10,20,30,10,5, 1, 3,", "-*- coding: utf-8 -*- \"\"\" Created on Wed Feb 9 07:33:20 2022 @author:", "num in lista: if num % 2 == 0: Pares.append(num) else: Impares.append(num) print(lista)", "coding: utf-8 -*- \"\"\" Created on Wed Feb 9 07:33:20 2022 @author: AMD", "5, 4] Pares=[] Impares=[] for num in lista: if num % 2 ==", "debe contener los impares lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[]", "for num in lista: if num % 2 == 0: Pares.append(num) else: Impares.append(num)", "-*- \"\"\" Created on Wed Feb 9 07:33:20 2022 @author: AMD \"\"\" #", "los impares lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for num", "[10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for num in lista: if num", "3, 5, 4] Pares=[] Impares=[] for num in lista: if num % 2", "lista solo debe contener los impares lista = [10,20,30,10,5, 1, 3, 5, 4]", "dos listas # una lista debe contener solo los pares y la segunda", "los pares y la segunda lista solo debe contener los impares lista =", "= [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for num in lista: if", "1, 3, 5, 4] separar en dos listas # una lista debe contener", "contener solo los pares y la segunda lista solo debe contener los impares", "impares lista = [10,20,30,10,5, 1, 3, 5, 4] Pares=[] Impares=[] for num in", "pares y la segunda lista solo debe contener los impares lista = [10,20,30,10,5,", "07:33:20 2022 @author: AMD \"\"\" # Dada la lista [10,20,30,10,5, 1, 3, 5,", "<gh_stars>0 # -*- coding: utf-8 -*- \"\"\" Created on Wed Feb 9 07:33:20", "Feb 9 07:33:20 2022 @author: AMD \"\"\" # Dada la lista [10,20,30,10,5, 1," ]
[]
[ "API', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz',", "description = 'A python lib to interact with Nasas API', author = '<NAME>',", "url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers = [],", "name = 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description = 'A python", "= ['SnowNasaPython'], version = '1.7', description = 'A python lib to interact with", "['SnowNasaPython'], version = '1.7', description = 'A python lib to interact with Nasas", "version = '1.7', description = 'A python lib to interact with Nasas API',", "Nasas API', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url =", "= '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers", "from distutils.core import setup setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version =", "import setup setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description", "author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords", "= 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers = [], py_modules = ['snownasapython'], )", "<gh_stars>0 from distutils.core import setup setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version", "to interact with Nasas API', author = '<NAME>', author_email = '<EMAIL>', url =", "= 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers = [], py_modules", "lib to interact with Nasas API', author = '<NAME>', author_email = '<EMAIL>', url", "with Nasas API', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url", "= '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords =", "interact with Nasas API', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git',", "'A python lib to interact with Nasas API', author = '<NAME>', author_email =", "'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers = [], py_modules =", "'<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers =", "setup setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description =", "packages = ['SnowNasaPython'], version = '1.7', description = 'A python lib to interact", "'1.7', description = 'A python lib to interact with Nasas API', author =", "= 'A python lib to interact with Nasas API', author = '<NAME>', author_email", "'<NAME>', author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa',", "= '1.7', description = 'A python lib to interact with Nasas API', author", "download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'], classifiers = [], py_modules = ['snownasapython'],", "setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description = 'A", "'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description = 'A python lib to", "distutils.core import setup setup( name = 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7',", "python lib to interact with Nasas API', author = '<NAME>', author_email = '<EMAIL>',", "author_email = '<EMAIL>', url = 'https://github.com/Jrsnow8921/SnowNasaPython.git', download_url = 'https://github.com/Jrsnow8921/SnowNasaPython/archive/1.7.tar.gz', keywords = ['Nasa', 'API'],", "= 'SnowNasaPython', packages = ['SnowNasaPython'], version = '1.7', description = 'A python lib" ]
[ "Developers', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python", "'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "from setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for", "description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver',", "mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ],", "version='1.0.0', keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[", "find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>',", "- Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language ::", "name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']),", "install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience", "'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python', 'Programming", ":: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming", "setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib',", "Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck ',", "5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language", "Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language", "Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English',", "<gh_stars>0 __author__ = 'lucky' from setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'],", ":: English', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming", "3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8',", "keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker',", "author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 -", "'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers',", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::", "', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status ::", "'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended", "ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status", "setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck", "data for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[", "author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5", "'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python ::", ":: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python ::", "Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language", "Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python", ":: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], python_requires=\">=3.6\", )", "], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language", "= 'lucky' from setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock", "Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6',", ":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python ::", "English', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language", "'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience ::", "for ck ', author='shengtao.yu', author_email='<EMAIL>', packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development", "Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], python_requires=\">=3.6\",", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming", "'lucky' from setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data", "__author__ = 'lucky' from setuptools import setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description='", "Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python',", ":: Developers', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language ::", "'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ],", "packages=find_packages(include=['mclib', 'mclib.*']), install_requires=[ 'faker', 'clickhouse-driver', ], classifiers=[ 'Development Status :: 5 - Production/Stable',", "'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language ::", "setup, find_packages setup( name='mclib', version='1.0.0', keywords=['mclib'], description=' mock data for ck ', author='shengtao.yu'," ]